]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
bda92fccb4ec6e34cdc0a5b5b457a0eae0d8194b
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
889
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
895 }
896
897 pa_sink_ref(s);
898
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
901
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
905
906 pa_assert(length > 0);
907
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909
910 if (n == 0) {
911
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 } else if (n == 1) {
919 pa_cvolume volume;
920
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
923
924 if (result->length > length)
925 result->length = length;
926
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
928
929 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
930 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
931 pa_memblock_unref(result->memblock);
932 pa_silence_memchunk_get(&s->core->silence_cache,
933 s->core->mempool,
934 result,
935 &s->sample_spec,
936 result->length);
937 } else {
938 pa_memchunk_make_writable(result, 0);
939 pa_volume_memchunk(result, &s->sample_spec, &volume);
940 }
941 }
942 } else {
943 void *ptr;
944 result->memblock = pa_memblock_new(s->core->mempool, length);
945
946 ptr = pa_memblock_acquire(result->memblock);
947 result->length = pa_mix(info, n,
948 ptr, length,
949 &s->sample_spec,
950 &s->thread_info.soft_volume,
951 s->thread_info.soft_muted);
952 pa_memblock_release(result->memblock);
953
954 result->index = 0;
955 }
956
957 inputs_drop(s, info, n, result);
958
959 pa_sink_unref(s);
960 }
961
962 /* Called from IO thread context */
963 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
964 pa_mix_info info[MAX_MIX_CHANNELS];
965 unsigned n;
966 size_t length, block_size_max;
967
968 pa_sink_assert_ref(s);
969 pa_sink_assert_io_context(s);
970 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
971 pa_assert(target);
972 pa_assert(target->memblock);
973 pa_assert(target->length > 0);
974 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
975
976 pa_assert(!s->thread_info.rewind_requested);
977 pa_assert(s->thread_info.rewind_nbytes == 0);
978
979 if (s->thread_info.state == PA_SINK_SUSPENDED) {
980 pa_silence_memchunk(target, &s->sample_spec);
981 return;
982 }
983
984 pa_sink_ref(s);
985
986 length = target->length;
987 block_size_max = pa_mempool_block_size_max(s->core->mempool);
988 if (length > block_size_max)
989 length = pa_frame_align(block_size_max, &s->sample_spec);
990
991 pa_assert(length > 0);
992
993 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
994
995 if (n == 0) {
996 if (target->length > length)
997 target->length = length;
998
999 pa_silence_memchunk(target, &s->sample_spec);
1000 } else if (n == 1) {
1001 pa_cvolume volume;
1002
1003 if (target->length > length)
1004 target->length = length;
1005
1006 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1007
1008 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1009 pa_silence_memchunk(target, &s->sample_spec);
1010 else {
1011 pa_memchunk vchunk;
1012
1013 vchunk = info[0].chunk;
1014 pa_memblock_ref(vchunk.memblock);
1015
1016 if (vchunk.length > length)
1017 vchunk.length = length;
1018
1019 if (!pa_cvolume_is_norm(&volume)) {
1020 pa_memchunk_make_writable(&vchunk, 0);
1021 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1022 }
1023
1024 pa_memchunk_memcpy(target, &vchunk);
1025 pa_memblock_unref(vchunk.memblock);
1026 }
1027
1028 } else {
1029 void *ptr;
1030
1031 ptr = pa_memblock_acquire(target->memblock);
1032
1033 target->length = pa_mix(info, n,
1034 (uint8_t*) ptr + target->index, length,
1035 &s->sample_spec,
1036 &s->thread_info.soft_volume,
1037 s->thread_info.soft_muted);
1038
1039 pa_memblock_release(target->memblock);
1040 }
1041
1042 inputs_drop(s, info, n, target);
1043
1044 pa_sink_unref(s);
1045 }
1046
1047 /* Called from IO thread context */
1048 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1049 pa_memchunk chunk;
1050 size_t l, d;
1051
1052 pa_sink_assert_ref(s);
1053 pa_sink_assert_io_context(s);
1054 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1055 pa_assert(target);
1056 pa_assert(target->memblock);
1057 pa_assert(target->length > 0);
1058 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1059
1060 pa_assert(!s->thread_info.rewind_requested);
1061 pa_assert(s->thread_info.rewind_nbytes == 0);
1062
1063 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1064 pa_silence_memchunk(target, &s->sample_spec);
1065 return;
1066 }
1067
1068 pa_sink_ref(s);
1069
1070 l = target->length;
1071 d = 0;
1072 while (l > 0) {
1073 chunk = *target;
1074 chunk.index += d;
1075 chunk.length -= d;
1076
1077 pa_sink_render_into(s, &chunk);
1078
1079 d += chunk.length;
1080 l -= chunk.length;
1081 }
1082
1083 pa_sink_unref(s);
1084 }
1085
1086 /* Called from IO thread context */
1087 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1088 pa_sink_assert_ref(s);
1089 pa_sink_assert_io_context(s);
1090 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1091 pa_assert(length > 0);
1092 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1093 pa_assert(result);
1094
1095 pa_assert(!s->thread_info.rewind_requested);
1096 pa_assert(s->thread_info.rewind_nbytes == 0);
1097
1098 pa_sink_ref(s);
1099
1100 pa_sink_render(s, length, result);
1101
1102 if (result->length < length) {
1103 pa_memchunk chunk;
1104
1105 pa_memchunk_make_writable(result, length);
1106
1107 chunk.memblock = result->memblock;
1108 chunk.index = result->index + result->length;
1109 chunk.length = length - result->length;
1110
1111 pa_sink_render_into_full(s, &chunk);
1112
1113 result->length = length;
1114 }
1115
1116 pa_sink_unref(s);
1117 }
1118
1119 /* Called from main thread */
1120 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1121 pa_usec_t usec = 0;
1122
1123 pa_sink_assert_ref(s);
1124 pa_assert_ctl_context();
1125 pa_assert(PA_SINK_IS_LINKED(s->state));
1126
1127 /* The returned value is supposed to be in the time domain of the sound card! */
1128
1129 if (s->state == PA_SINK_SUSPENDED)
1130 return 0;
1131
1132 if (!(s->flags & PA_SINK_LATENCY))
1133 return 0;
1134
1135 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1136
1137 return usec;
1138 }
1139
1140 /* Called from IO thread */
1141 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1142 pa_usec_t usec = 0;
1143 pa_msgobject *o;
1144
1145 pa_sink_assert_ref(s);
1146 pa_sink_assert_io_context(s);
1147 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1148
1149 /* The returned value is supposed to be in the time domain of the sound card! */
1150
1151 if (s->thread_info.state == PA_SINK_SUSPENDED)
1152 return 0;
1153
1154 if (!(s->flags & PA_SINK_LATENCY))
1155 return 0;
1156
1157 o = PA_MSGOBJECT(s);
1158
1159 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1160
1161 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1162 return -1;
1163
1164 return usec;
1165 }
1166
1167 /* Called from main context */
1168 static void compute_reference_ratios(pa_sink *s) {
1169 uint32_t idx;
1170 pa_sink_input *i;
1171
1172 pa_sink_assert_ref(s);
1173 pa_assert_ctl_context();
1174 pa_assert(PA_SINK_IS_LINKED(s->state));
1175 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1176
1177 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1178 unsigned c;
1179 pa_cvolume remapped;
1180
1181 /*
1182 * Calculates the reference volume from the sink's reference
1183 * volume. This basically calculates:
1184 *
1185 * i->reference_ratio = i->volume / s->reference_volume
1186 */
1187
1188 remapped = s->reference_volume;
1189 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1190
1191 i->reference_ratio.channels = i->sample_spec.channels;
1192
1193 for (c = 0; c < i->sample_spec.channels; c++) {
1194
1195 /* We don't update when the sink volume is 0 anyway */
1196 if (remapped.values[c] <= PA_VOLUME_MUTED)
1197 continue;
1198
1199 /* Don't update the reference ratio unless necessary */
1200 if (pa_sw_volume_multiply(
1201 i->reference_ratio.values[c],
1202 remapped.values[c]) == i->volume.values[c])
1203 continue;
1204
1205 i->reference_ratio.values[c] = pa_sw_volume_divide(
1206 i->volume.values[c],
1207 remapped.values[c]);
1208 }
1209 }
1210 }
1211
1212 /* Called from main context */
1213 static void compute_real_ratios(pa_sink *s) {
1214 pa_sink_input *i;
1215 uint32_t idx;
1216
1217 pa_sink_assert_ref(s);
1218 pa_assert_ctl_context();
1219 pa_assert(PA_SINK_IS_LINKED(s->state));
1220 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1221
1222 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1223 unsigned c;
1224 pa_cvolume remapped;
1225
1226 /*
1227 * This basically calculates:
1228 *
1229 * i->real_ratio := i->volume / s->real_volume
1230 * i->soft_volume := i->real_ratio * i->volume_factor
1231 */
1232
1233 remapped = s->real_volume;
1234 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1235
1236 i->real_ratio.channels = i->sample_spec.channels;
1237 i->soft_volume.channels = i->sample_spec.channels;
1238
1239 for (c = 0; c < i->sample_spec.channels; c++) {
1240
1241 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1242 /* We leave i->real_ratio untouched */
1243 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1244 continue;
1245 }
1246
1247 /* Don't lose accuracy unless necessary */
1248 if (pa_sw_volume_multiply(
1249 i->real_ratio.values[c],
1250 remapped.values[c]) != i->volume.values[c])
1251
1252 i->real_ratio.values[c] = pa_sw_volume_divide(
1253 i->volume.values[c],
1254 remapped.values[c]);
1255
1256 i->soft_volume.values[c] = pa_sw_volume_multiply(
1257 i->real_ratio.values[c],
1258 i->volume_factor.values[c]);
1259 }
1260
1261 /* We don't copy the soft_volume to the thread_info data
1262 * here. That must be done by the caller */
1263 }
1264 }
1265
1266 /* Called from main thread */
1267 static void compute_real_volume(pa_sink *s) {
1268 pa_sink_input *i;
1269 uint32_t idx;
1270
1271 pa_sink_assert_ref(s);
1272 pa_assert_ctl_context();
1273 pa_assert(PA_SINK_IS_LINKED(s->state));
1274 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1275
1276 /* This determines the maximum volume of all streams and sets
1277 * s->real_volume accordingly. */
1278
1279 if (pa_idxset_isempty(s->inputs)) {
1280 /* In the special case that we have no sink input we leave the
1281 * volume unmodified. */
1282 s->real_volume = s->reference_volume;
1283 return;
1284 }
1285
1286 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1287
1288 /* First let's determine the new maximum volume of all inputs
1289 * connected to this sink */
1290 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1291 pa_cvolume remapped;
1292
1293 remapped = i->volume;
1294 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1295 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1296 }
1297
1298 /* Then, let's update the real ratios/soft volumes of all inputs
1299 * connected to this sink */
1300 compute_real_ratios(s);
1301 }
1302
1303 /* Called from main thread */
1304 static void propagate_reference_volume(pa_sink *s) {
1305 pa_sink_input *i;
1306 uint32_t idx;
1307
1308 pa_sink_assert_ref(s);
1309 pa_assert_ctl_context();
1310 pa_assert(PA_SINK_IS_LINKED(s->state));
1311 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1312
1313 /* This is called whenever the sink volume changes that is not
1314 * caused by a sink input volume change. We need to fix up the
1315 * sink input volumes accordingly */
1316
1317 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1318 pa_cvolume old_volume, remapped;
1319
1320 old_volume = i->volume;
1321
1322 /* This basically calculates:
1323 *
1324 * i->volume := s->reference_volume * i->reference_ratio */
1325
1326 remapped = s->reference_volume;
1327 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1328 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1329
1330 /* The volume changed, let's tell people so */
1331 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1332
1333 if (i->volume_changed)
1334 i->volume_changed(i);
1335
1336 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1337 }
1338 }
1339 }
1340
1341 /* Called from main thread */
1342 void pa_sink_set_volume(
1343 pa_sink *s,
1344 const pa_cvolume *volume,
1345 pa_bool_t sendmsg,
1346 pa_bool_t save) {
1347
1348 pa_cvolume old_reference_volume;
1349 pa_bool_t reference_changed;
1350
1351 pa_sink_assert_ref(s);
1352 pa_assert_ctl_context();
1353 pa_assert(PA_SINK_IS_LINKED(s->state));
1354 pa_assert(!volume || pa_cvolume_valid(volume));
1355 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1356 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1357
1358 /* As a special exception we accept mono volumes on all sinks --
1359 * even on those with more complex channel maps */
1360
1361 /* If volume is NULL we synchronize the sink's real and reference
1362 * volumes with the stream volumes. If it is not NULL we update
1363 * the reference_volume with it. */
1364
1365 old_reference_volume = s->reference_volume;
1366
1367 if (volume) {
1368
1369 if (pa_cvolume_compatible(volume, &s->sample_spec))
1370 s->reference_volume = *volume;
1371 else
1372 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1373
1374 if (s->flags & PA_SINK_FLAT_VOLUME) {
1375 /* OK, propagate this volume change back to the inputs */
1376 propagate_reference_volume(s);
1377
1378 /* And now recalculate the real volume */
1379 compute_real_volume(s);
1380 } else
1381 s->real_volume = s->reference_volume;
1382
1383 } else {
1384 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1385
1386 /* Ok, let's determine the new real volume */
1387 compute_real_volume(s);
1388
1389 /* Let's 'push' the reference volume if necessary */
1390 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1391
1392 /* We need to fix the reference ratios of all streams now that
1393 * we changed the reference volume */
1394 compute_reference_ratios(s);
1395 }
1396
1397 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1398 s->save_volume = (!reference_changed && s->save_volume) || save;
1399
1400 if (s->set_volume) {
1401 /* If we have a function set_volume(), then we do not apply a
1402 * soft volume by default. However, set_volume() is free to
1403 * apply one to s->soft_volume */
1404
1405 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1406 s->set_volume(s);
1407
1408 } else
1409 /* If we have no function set_volume(), then the soft volume
1410 * becomes the virtual volume */
1411 s->soft_volume = s->real_volume;
1412
1413 /* This tells the sink that soft and/or virtual volume changed */
1414 if (sendmsg)
1415 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1416
1417 if (reference_changed)
1418 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1419 }
1420
1421 /* Called from main thread. Only to be called by sink implementor */
1422 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1423 pa_sink_assert_ref(s);
1424 pa_assert_ctl_context();
1425
1426 if (!volume)
1427 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1428 else
1429 s->soft_volume = *volume;
1430
1431 if (PA_SINK_IS_LINKED(s->state))
1432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1433 else
1434 s->thread_info.soft_volume = s->soft_volume;
1435 }
1436
1437 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1438 pa_sink_input *i;
1439 uint32_t idx;
1440 pa_cvolume old_reference_volume;
1441
1442 pa_sink_assert_ref(s);
1443 pa_assert_ctl_context();
1444 pa_assert(PA_SINK_IS_LINKED(s->state));
1445
1446 /* This is called when the hardware's real volume changes due to
1447 * some external event. We copy the real volume into our
1448 * reference volume and then rebuild the stream volumes based on
1449 * i->real_ratio which should stay fixed. */
1450
1451 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1452 return;
1453
1454 old_reference_volume = s->reference_volume;
1455
1456 /* 1. Make the real volume the reference volume */
1457 s->reference_volume = s->real_volume;
1458
1459 if (s->flags & PA_SINK_FLAT_VOLUME) {
1460
1461 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1462 pa_cvolume old_volume, remapped;
1463
1464 old_volume = i->volume;
1465
1466 /* 2. Since the sink's reference and real volumes are equal
1467 * now our ratios should be too. */
1468 i->reference_ratio = i->real_ratio;
1469
1470 /* 3. Recalculate the new stream reference volume based on the
1471 * reference ratio and the sink's reference volume.
1472 *
1473 * This basically calculates:
1474 *
1475 * i->volume = s->reference_volume * i->reference_ratio
1476 *
1477 * This is identical to propagate_reference_volume() */
1478 remapped = s->reference_volume;
1479 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1480 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1481
1482 /* Notify if something changed */
1483 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1484
1485 if (i->volume_changed)
1486 i->volume_changed(i);
1487
1488 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1489 }
1490 }
1491 }
1492
1493 /* Something got changed in the hardware. It probably makes sense
1494 * to save changed hw settings given that hw volume changes not
1495 * triggered by PA are almost certainly done by the user. */
1496 s->save_volume = TRUE;
1497
1498 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1500 }
1501
1502 /* Called from main thread */
1503 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1504 pa_sink_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SINK_IS_LINKED(s->state));
1507
1508 if (s->refresh_volume || force_refresh) {
1509 struct pa_cvolume old_real_volume;
1510
1511 old_real_volume = s->real_volume;
1512
1513 if (s->get_volume)
1514 s->get_volume(s);
1515
1516 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1517
1518 propagate_real_volume(s, &old_real_volume);
1519 }
1520
1521 return &s->reference_volume;
1522 }
1523
1524 /* Called from main thread */
1525 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1526 pa_cvolume old_real_volume;
1527
1528 pa_sink_assert_ref(s);
1529 pa_assert_ctl_context();
1530 pa_assert(PA_SINK_IS_LINKED(s->state));
1531
1532 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1533
1534 old_real_volume = s->real_volume;
1535 s->real_volume = *new_real_volume;
1536
1537 propagate_real_volume(s, &old_real_volume);
1538 }
1539
1540 /* Called from main thread */
1541 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1542 pa_bool_t old_muted;
1543
1544 pa_sink_assert_ref(s);
1545 pa_assert_ctl_context();
1546 pa_assert(PA_SINK_IS_LINKED(s->state));
1547
1548 old_muted = s->muted;
1549 s->muted = mute;
1550 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1551
1552 if (s->set_mute)
1553 s->set_mute(s);
1554
1555 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1556
1557 if (old_muted != s->muted)
1558 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1559 }
1560
1561 /* Called from main thread */
1562 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1563
1564 pa_sink_assert_ref(s);
1565 pa_assert_ctl_context();
1566 pa_assert(PA_SINK_IS_LINKED(s->state));
1567
1568 if (s->refresh_muted || force_refresh) {
1569 pa_bool_t old_muted = s->muted;
1570
1571 if (s->get_mute)
1572 s->get_mute(s);
1573
1574 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1575
1576 if (old_muted != s->muted) {
1577 s->save_muted = TRUE;
1578
1579 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1580
1581 /* Make sure the soft mute status stays in sync */
1582 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1583 }
1584 }
1585
1586 return s->muted;
1587 }
1588
1589 /* Called from main thread */
1590 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1591 pa_sink_assert_ref(s);
1592 pa_assert_ctl_context();
1593 pa_assert(PA_SINK_IS_LINKED(s->state));
1594
1595 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1596
1597 if (s->muted == new_muted)
1598 return;
1599
1600 s->muted = new_muted;
1601 s->save_muted = TRUE;
1602
1603 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1604 }
1605
1606 /* Called from main thread */
1607 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1608 pa_sink_assert_ref(s);
1609 pa_assert_ctl_context();
1610
1611 if (p)
1612 pa_proplist_update(s->proplist, mode, p);
1613
1614 if (PA_SINK_IS_LINKED(s->state)) {
1615 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1616 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1617 }
1618
1619 return TRUE;
1620 }
1621
1622 /* Called from main thread */
1623 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1624 void pa_sink_set_description(pa_sink *s, const char *description) {
1625 const char *old;
1626 pa_sink_assert_ref(s);
1627 pa_assert_ctl_context();
1628
1629 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1630 return;
1631
1632 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1633
1634 if (old && description && pa_streq(old, description))
1635 return;
1636
1637 if (description)
1638 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1639 else
1640 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1641
1642 if (s->monitor_source) {
1643 char *n;
1644
1645 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1646 pa_source_set_description(s->monitor_source, n);
1647 pa_xfree(n);
1648 }
1649
1650 if (PA_SINK_IS_LINKED(s->state)) {
1651 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1652 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1653 }
1654 }
1655
1656 /* Called from main thread */
1657 unsigned pa_sink_linked_by(pa_sink *s) {
1658 unsigned ret;
1659
1660 pa_sink_assert_ref(s);
1661 pa_assert_ctl_context();
1662 pa_assert(PA_SINK_IS_LINKED(s->state));
1663
1664 ret = pa_idxset_size(s->inputs);
1665
1666 /* We add in the number of streams connected to us here. Please
1667 * note the asymmmetry to pa_sink_used_by()! */
1668
1669 if (s->monitor_source)
1670 ret += pa_source_linked_by(s->monitor_source);
1671
1672 return ret;
1673 }
1674
1675 /* Called from main thread */
1676 unsigned pa_sink_used_by(pa_sink *s) {
1677 unsigned ret;
1678
1679 pa_sink_assert_ref(s);
1680 pa_assert_ctl_context();
1681 pa_assert(PA_SINK_IS_LINKED(s->state));
1682
1683 ret = pa_idxset_size(s->inputs);
1684 pa_assert(ret >= s->n_corked);
1685
1686 /* Streams connected to our monitor source do not matter for
1687 * pa_sink_used_by()!.*/
1688
1689 return ret - s->n_corked;
1690 }
1691
1692 /* Called from main thread */
1693 unsigned pa_sink_check_suspend(pa_sink *s) {
1694 unsigned ret;
1695 pa_sink_input *i;
1696 uint32_t idx;
1697
1698 pa_sink_assert_ref(s);
1699 pa_assert_ctl_context();
1700
1701 if (!PA_SINK_IS_LINKED(s->state))
1702 return 0;
1703
1704 ret = 0;
1705
1706 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1707 pa_sink_input_state_t st;
1708
1709 st = pa_sink_input_get_state(i);
1710 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1711
1712 if (st == PA_SINK_INPUT_CORKED)
1713 continue;
1714
1715 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1716 continue;
1717
1718 ret ++;
1719 }
1720
1721 if (s->monitor_source)
1722 ret += pa_source_check_suspend(s->monitor_source);
1723
1724 return ret;
1725 }
1726
1727 /* Called from the IO thread */
1728 static void sync_input_volumes_within_thread(pa_sink *s) {
1729 pa_sink_input *i;
1730 void *state = NULL;
1731
1732 pa_sink_assert_ref(s);
1733 pa_sink_assert_io_context(s);
1734
1735 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1736 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1737 continue;
1738
1739 i->thread_info.soft_volume = i->soft_volume;
1740 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1741 }
1742 }
1743
1744 /* Called from IO thread, except when it is not */
1745 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1746 pa_sink *s = PA_SINK(o);
1747 pa_sink_assert_ref(s);
1748
1749 switch ((pa_sink_message_t) code) {
1750
1751 case PA_SINK_MESSAGE_ADD_INPUT: {
1752 pa_sink_input *i = PA_SINK_INPUT(userdata);
1753
1754 /* If you change anything here, make sure to change the
1755 * sink input handling a few lines down at
1756 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1757
1758 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1759
1760 /* Since the caller sleeps in pa_sink_input_put(), we can
1761 * safely access data outside of thread_info even though
1762 * it is mutable */
1763
1764 if ((i->thread_info.sync_prev = i->sync_prev)) {
1765 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1766 pa_assert(i->sync_prev->sync_next == i);
1767 i->thread_info.sync_prev->thread_info.sync_next = i;
1768 }
1769
1770 if ((i->thread_info.sync_next = i->sync_next)) {
1771 pa_assert(i->sink == i->thread_info.sync_next->sink);
1772 pa_assert(i->sync_next->sync_prev == i);
1773 i->thread_info.sync_next->thread_info.sync_prev = i;
1774 }
1775
1776 pa_assert(!i->thread_info.attached);
1777 i->thread_info.attached = TRUE;
1778
1779 if (i->attach)
1780 i->attach(i);
1781
1782 pa_sink_input_set_state_within_thread(i, i->state);
1783
1784 /* The requested latency of the sink input needs to be
1785 * fixed up and then configured on the sink */
1786
1787 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1788 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1789
1790 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1791 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1792
1793 /* We don't rewind here automatically. This is left to the
1794 * sink input implementor because some sink inputs need a
1795 * slow start, i.e. need some time to buffer client
1796 * samples before beginning streaming. */
1797
1798 /* In flat volume mode we need to update the volume as
1799 * well */
1800 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1801 }
1802
1803 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1804 pa_sink_input *i = PA_SINK_INPUT(userdata);
1805
1806 /* If you change anything here, make sure to change the
1807 * sink input handling a few lines down at
1808 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1809
1810 if (i->detach)
1811 i->detach(i);
1812
1813 pa_sink_input_set_state_within_thread(i, i->state);
1814
1815 pa_assert(i->thread_info.attached);
1816 i->thread_info.attached = FALSE;
1817
1818 /* Since the caller sleeps in pa_sink_input_unlink(),
1819 * we can safely access data outside of thread_info even
1820 * though it is mutable */
1821
1822 pa_assert(!i->sync_prev);
1823 pa_assert(!i->sync_next);
1824
1825 if (i->thread_info.sync_prev) {
1826 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1827 i->thread_info.sync_prev = NULL;
1828 }
1829
1830 if (i->thread_info.sync_next) {
1831 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1832 i->thread_info.sync_next = NULL;
1833 }
1834
1835 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1836 pa_sink_input_unref(i);
1837
1838 pa_sink_invalidate_requested_latency(s, TRUE);
1839 pa_sink_request_rewind(s, (size_t) -1);
1840
1841 /* In flat volume mode we need to update the volume as
1842 * well */
1843 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1844 }
1845
1846 case PA_SINK_MESSAGE_START_MOVE: {
1847 pa_sink_input *i = PA_SINK_INPUT(userdata);
1848
1849 /* We don't support moving synchronized streams. */
1850 pa_assert(!i->sync_prev);
1851 pa_assert(!i->sync_next);
1852 pa_assert(!i->thread_info.sync_next);
1853 pa_assert(!i->thread_info.sync_prev);
1854
1855 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1856 pa_usec_t usec = 0;
1857 size_t sink_nbytes, total_nbytes;
1858
1859 /* Get the latency of the sink */
1860 usec = pa_sink_get_latency_within_thread(s);
1861 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1862 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1863
1864 if (total_nbytes > 0) {
1865 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1866 i->thread_info.rewrite_flush = TRUE;
1867 pa_sink_input_process_rewind(i, sink_nbytes);
1868 }
1869 }
1870
1871 if (i->detach)
1872 i->detach(i);
1873
1874 pa_assert(i->thread_info.attached);
1875 i->thread_info.attached = FALSE;
1876
1877 /* Let's remove the sink input ...*/
1878 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1879 pa_sink_input_unref(i);
1880
1881 pa_sink_invalidate_requested_latency(s, TRUE);
1882
1883 pa_log_debug("Requesting rewind due to started move");
1884 pa_sink_request_rewind(s, (size_t) -1);
1885
1886 /* In flat volume mode we need to update the volume as
1887 * well */
1888 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1889 }
1890
1891 case PA_SINK_MESSAGE_FINISH_MOVE: {
1892 pa_sink_input *i = PA_SINK_INPUT(userdata);
1893
1894 /* We don't support moving synchronized streams. */
1895 pa_assert(!i->sync_prev);
1896 pa_assert(!i->sync_next);
1897 pa_assert(!i->thread_info.sync_next);
1898 pa_assert(!i->thread_info.sync_prev);
1899
1900 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1901
1902 pa_assert(!i->thread_info.attached);
1903 i->thread_info.attached = TRUE;
1904
1905 if (i->attach)
1906 i->attach(i);
1907
1908 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1909 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1910
1911 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1912 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1913
1914 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1915 pa_usec_t usec = 0;
1916 size_t nbytes;
1917
1918 /* Get the latency of the sink */
1919 usec = pa_sink_get_latency_within_thread(s);
1920 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1921
1922 if (nbytes > 0)
1923 pa_sink_input_drop(i, nbytes);
1924
1925 pa_log_debug("Requesting rewind due to finished move");
1926 pa_sink_request_rewind(s, nbytes);
1927 }
1928
1929 /* In flat volume mode we need to update the volume as
1930 * well */
1931 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1932 }
1933
1934 case PA_SINK_MESSAGE_SET_VOLUME:
1935
1936 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1937 s->thread_info.soft_volume = s->soft_volume;
1938 pa_sink_request_rewind(s, (size_t) -1);
1939 }
1940
1941 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1942 return 0;
1943
1944 /* Fall through ... */
1945
1946 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1947 sync_input_volumes_within_thread(s);
1948 return 0;
1949
1950 case PA_SINK_MESSAGE_GET_VOLUME:
1951 return 0;
1952
1953 case PA_SINK_MESSAGE_SET_MUTE:
1954
1955 if (s->thread_info.soft_muted != s->muted) {
1956 s->thread_info.soft_muted = s->muted;
1957 pa_sink_request_rewind(s, (size_t) -1);
1958 }
1959
1960 return 0;
1961
1962 case PA_SINK_MESSAGE_GET_MUTE:
1963 return 0;
1964
1965 case PA_SINK_MESSAGE_SET_STATE: {
1966
1967 pa_bool_t suspend_change =
1968 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1969 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1970
1971 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1972
1973 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1974 s->thread_info.rewind_nbytes = 0;
1975 s->thread_info.rewind_requested = FALSE;
1976 }
1977
1978 if (suspend_change) {
1979 pa_sink_input *i;
1980 void *state = NULL;
1981
1982 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1983 if (i->suspend_within_thread)
1984 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1985 }
1986
1987 return 0;
1988 }
1989
1990 case PA_SINK_MESSAGE_DETACH:
1991
1992 /* Detach all streams */
1993 pa_sink_detach_within_thread(s);
1994 return 0;
1995
1996 case PA_SINK_MESSAGE_ATTACH:
1997
1998 /* Reattach all streams */
1999 pa_sink_attach_within_thread(s);
2000 return 0;
2001
2002 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2003
2004 pa_usec_t *usec = userdata;
2005 *usec = pa_sink_get_requested_latency_within_thread(s);
2006
2007 /* Yes, that's right, the IO thread will see -1 when no
2008 * explicit requested latency is configured, the main
2009 * thread will see max_latency */
2010 if (*usec == (pa_usec_t) -1)
2011 *usec = s->thread_info.max_latency;
2012
2013 return 0;
2014 }
2015
2016 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2017 pa_usec_t *r = userdata;
2018
2019 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2020
2021 return 0;
2022 }
2023
2024 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2025 pa_usec_t *r = userdata;
2026
2027 r[0] = s->thread_info.min_latency;
2028 r[1] = s->thread_info.max_latency;
2029
2030 return 0;
2031 }
2032
2033 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2034
2035 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2036 return 0;
2037
2038 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2039
2040 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2041 return 0;
2042
2043 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2044
2045 *((size_t*) userdata) = s->thread_info.max_rewind;
2046 return 0;
2047
2048 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2049
2050 *((size_t*) userdata) = s->thread_info.max_request;
2051 return 0;
2052
2053 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2054
2055 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2056 return 0;
2057
2058 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2059
2060 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2061 return 0;
2062
2063 case PA_SINK_MESSAGE_GET_LATENCY:
2064 case PA_SINK_MESSAGE_MAX:
2065 ;
2066 }
2067
2068 return -1;
2069 }
2070
2071 /* Called from main thread */
2072 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2073 pa_sink *sink;
2074 uint32_t idx;
2075 int ret = 0;
2076
2077 pa_core_assert_ref(c);
2078 pa_assert_ctl_context();
2079 pa_assert(cause != 0);
2080
2081 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2082 int r;
2083
2084 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2085 ret = r;
2086 }
2087
2088 return ret;
2089 }
2090
2091 /* Called from main thread */
2092 void pa_sink_detach(pa_sink *s) {
2093 pa_sink_assert_ref(s);
2094 pa_assert_ctl_context();
2095 pa_assert(PA_SINK_IS_LINKED(s->state));
2096
2097 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2098 }
2099
2100 /* Called from main thread */
2101 void pa_sink_attach(pa_sink *s) {
2102 pa_sink_assert_ref(s);
2103 pa_assert_ctl_context();
2104 pa_assert(PA_SINK_IS_LINKED(s->state));
2105
2106 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2107 }
2108
2109 /* Called from IO thread */
2110 void pa_sink_detach_within_thread(pa_sink *s) {
2111 pa_sink_input *i;
2112 void *state = NULL;
2113
2114 pa_sink_assert_ref(s);
2115 pa_sink_assert_io_context(s);
2116 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2117
2118 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2119 if (i->detach)
2120 i->detach(i);
2121
2122 if (s->monitor_source)
2123 pa_source_detach_within_thread(s->monitor_source);
2124 }
2125
2126 /* Called from IO thread */
2127 void pa_sink_attach_within_thread(pa_sink *s) {
2128 pa_sink_input *i;
2129 void *state = NULL;
2130
2131 pa_sink_assert_ref(s);
2132 pa_sink_assert_io_context(s);
2133 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2134
2135 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2136 if (i->attach)
2137 i->attach(i);
2138
2139 if (s->monitor_source)
2140 pa_source_attach_within_thread(s->monitor_source);
2141 }
2142
2143 /* Called from IO thread */
2144 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2145 pa_sink_assert_ref(s);
2146 pa_sink_assert_io_context(s);
2147 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2148
2149 if (s->thread_info.state == PA_SINK_SUSPENDED)
2150 return;
2151
2152 if (nbytes == (size_t) -1)
2153 nbytes = s->thread_info.max_rewind;
2154
2155 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2156
2157 if (s->thread_info.rewind_requested &&
2158 nbytes <= s->thread_info.rewind_nbytes)
2159 return;
2160
2161 s->thread_info.rewind_nbytes = nbytes;
2162 s->thread_info.rewind_requested = TRUE;
2163
2164 if (s->request_rewind)
2165 s->request_rewind(s);
2166 }
2167
2168 /* Called from IO thread */
2169 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2170 pa_usec_t result = (pa_usec_t) -1;
2171 pa_sink_input *i;
2172 void *state = NULL;
2173 pa_usec_t monitor_latency;
2174
2175 pa_sink_assert_ref(s);
2176 pa_sink_assert_io_context(s);
2177
2178 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2179 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2180
2181 if (s->thread_info.requested_latency_valid)
2182 return s->thread_info.requested_latency;
2183
2184 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2185 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2186 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2187 result = i->thread_info.requested_sink_latency;
2188
2189 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2190
2191 if (monitor_latency != (pa_usec_t) -1 &&
2192 (result == (pa_usec_t) -1 || result > monitor_latency))
2193 result = monitor_latency;
2194
2195 if (result != (pa_usec_t) -1)
2196 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2197
2198 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2199 /* Only cache if properly initialized */
2200 s->thread_info.requested_latency = result;
2201 s->thread_info.requested_latency_valid = TRUE;
2202 }
2203
2204 return result;
2205 }
2206
2207 /* Called from main thread */
2208 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2209 pa_usec_t usec = 0;
2210
2211 pa_sink_assert_ref(s);
2212 pa_assert_ctl_context();
2213 pa_assert(PA_SINK_IS_LINKED(s->state));
2214
2215 if (s->state == PA_SINK_SUSPENDED)
2216 return 0;
2217
2218 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2219 return usec;
2220 }
2221
2222 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2223 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2224 pa_sink_input *i;
2225 void *state = NULL;
2226
2227 pa_sink_assert_ref(s);
2228 pa_sink_assert_io_context(s);
2229
2230 if (max_rewind == s->thread_info.max_rewind)
2231 return;
2232
2233 s->thread_info.max_rewind = max_rewind;
2234
2235 if (PA_SINK_IS_LINKED(s->thread_info.state))
2236 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2237 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2238
2239 if (s->monitor_source)
2240 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2241 }
2242
2243 /* Called from main thread */
2244 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2245 pa_sink_assert_ref(s);
2246 pa_assert_ctl_context();
2247
2248 if (PA_SINK_IS_LINKED(s->state))
2249 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2250 else
2251 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2252 }
2253
2254 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2255 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2256 void *state = NULL;
2257
2258 pa_sink_assert_ref(s);
2259 pa_sink_assert_io_context(s);
2260
2261 if (max_request == s->thread_info.max_request)
2262 return;
2263
2264 s->thread_info.max_request = max_request;
2265
2266 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2267 pa_sink_input *i;
2268
2269 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2270 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2271 }
2272 }
2273
2274 /* Called from main thread */
2275 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2276 pa_sink_assert_ref(s);
2277 pa_assert_ctl_context();
2278
2279 if (PA_SINK_IS_LINKED(s->state))
2280 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2281 else
2282 pa_sink_set_max_request_within_thread(s, max_request);
2283 }
2284
2285 /* Called from IO thread */
2286 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2287 pa_sink_input *i;
2288 void *state = NULL;
2289
2290 pa_sink_assert_ref(s);
2291 pa_sink_assert_io_context(s);
2292
2293 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2294 s->thread_info.requested_latency_valid = FALSE;
2295 else if (dynamic)
2296 return;
2297
2298 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2299
2300 if (s->update_requested_latency)
2301 s->update_requested_latency(s);
2302
2303 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2304 if (i->update_sink_requested_latency)
2305 i->update_sink_requested_latency(i);
2306 }
2307 }
2308
2309 /* Called from main thread */
2310 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2311 pa_sink_assert_ref(s);
2312 pa_assert_ctl_context();
2313
2314 /* min_latency == 0: no limit
2315 * min_latency anything else: specified limit
2316 *
2317 * Similar for max_latency */
2318
2319 if (min_latency < ABSOLUTE_MIN_LATENCY)
2320 min_latency = ABSOLUTE_MIN_LATENCY;
2321
2322 if (max_latency <= 0 ||
2323 max_latency > ABSOLUTE_MAX_LATENCY)
2324 max_latency = ABSOLUTE_MAX_LATENCY;
2325
2326 pa_assert(min_latency <= max_latency);
2327
2328 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2329 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2330 max_latency == ABSOLUTE_MAX_LATENCY) ||
2331 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2332
2333 if (PA_SINK_IS_LINKED(s->state)) {
2334 pa_usec_t r[2];
2335
2336 r[0] = min_latency;
2337 r[1] = max_latency;
2338
2339 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2340 } else
2341 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2342 }
2343
2344 /* Called from main thread */
2345 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2346 pa_sink_assert_ref(s);
2347 pa_assert_ctl_context();
2348 pa_assert(min_latency);
2349 pa_assert(max_latency);
2350
2351 if (PA_SINK_IS_LINKED(s->state)) {
2352 pa_usec_t r[2] = { 0, 0 };
2353
2354 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2355
2356 *min_latency = r[0];
2357 *max_latency = r[1];
2358 } else {
2359 *min_latency = s->thread_info.min_latency;
2360 *max_latency = s->thread_info.max_latency;
2361 }
2362 }
2363
2364 /* Called from IO thread */
2365 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2366 pa_sink_assert_ref(s);
2367 pa_sink_assert_io_context(s);
2368
2369 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2370 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2371 pa_assert(min_latency <= max_latency);
2372
2373 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2374 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2375 max_latency == ABSOLUTE_MAX_LATENCY) ||
2376 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2377
2378 if (s->thread_info.min_latency == min_latency &&
2379 s->thread_info.max_latency == max_latency)
2380 return;
2381
2382 s->thread_info.min_latency = min_latency;
2383 s->thread_info.max_latency = max_latency;
2384
2385 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2386 pa_sink_input *i;
2387 void *state = NULL;
2388
2389 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2390 if (i->update_sink_latency_range)
2391 i->update_sink_latency_range(i);
2392 }
2393
2394 pa_sink_invalidate_requested_latency(s, FALSE);
2395
2396 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2397 }
2398
2399 /* Called from main thread */
2400 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2401 pa_sink_assert_ref(s);
2402 pa_assert_ctl_context();
2403
2404 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2405 pa_assert(latency == 0);
2406 return;
2407 }
2408
2409 if (latency < ABSOLUTE_MIN_LATENCY)
2410 latency = ABSOLUTE_MIN_LATENCY;
2411
2412 if (latency > ABSOLUTE_MAX_LATENCY)
2413 latency = ABSOLUTE_MAX_LATENCY;
2414
2415 if (PA_SINK_IS_LINKED(s->state))
2416 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2417 else
2418 s->thread_info.fixed_latency = latency;
2419
2420 pa_source_set_fixed_latency(s->monitor_source, latency);
2421 }
2422
2423 /* Called from main thread */
2424 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2425 pa_usec_t latency;
2426
2427 pa_sink_assert_ref(s);
2428 pa_assert_ctl_context();
2429
2430 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2431 return 0;
2432
2433 if (PA_SINK_IS_LINKED(s->state))
2434 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2435 else
2436 latency = s->thread_info.fixed_latency;
2437
2438 return latency;
2439 }
2440
2441 /* Called from IO thread */
2442 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2443 pa_sink_assert_ref(s);
2444 pa_sink_assert_io_context(s);
2445
2446 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2447 pa_assert(latency == 0);
2448 return;
2449 }
2450
2451 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2452 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2453
2454 if (s->thread_info.fixed_latency == latency)
2455 return;
2456
2457 s->thread_info.fixed_latency = latency;
2458
2459 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2460 pa_sink_input *i;
2461 void *state = NULL;
2462
2463 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2464 if (i->update_sink_fixed_latency)
2465 i->update_sink_fixed_latency(i);
2466 }
2467
2468 pa_sink_invalidate_requested_latency(s, FALSE);
2469
2470 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2471 }
2472
2473 /* Called from main context */
2474 size_t pa_sink_get_max_rewind(pa_sink *s) {
2475 size_t r;
2476 pa_sink_assert_ref(s);
2477 pa_assert_ctl_context();
2478
2479 if (!PA_SINK_IS_LINKED(s->state))
2480 return s->thread_info.max_rewind;
2481
2482 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2483
2484 return r;
2485 }
2486
2487 /* Called from main context */
2488 size_t pa_sink_get_max_request(pa_sink *s) {
2489 size_t r;
2490 pa_sink_assert_ref(s);
2491 pa_assert_ctl_context();
2492
2493 if (!PA_SINK_IS_LINKED(s->state))
2494 return s->thread_info.max_request;
2495
2496 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2497
2498 return r;
2499 }
2500
2501 /* Called from main context */
2502 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2503 pa_device_port *port;
2504
2505 pa_sink_assert_ref(s);
2506 pa_assert_ctl_context();
2507
2508 if (!s->set_port) {
2509 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2510 return -PA_ERR_NOTIMPLEMENTED;
2511 }
2512
2513 if (!s->ports)
2514 return -PA_ERR_NOENTITY;
2515
2516 if (!(port = pa_hashmap_get(s->ports, name)))
2517 return -PA_ERR_NOENTITY;
2518
2519 if (s->active_port == port) {
2520 s->save_port = s->save_port || save;
2521 return 0;
2522 }
2523
2524 if ((s->set_port(s, port)) < 0)
2525 return -PA_ERR_NOENTITY;
2526
2527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2528
2529 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2530
2531 s->active_port = port;
2532 s->save_port = save;
2533
2534 return 0;
2535 }
2536
2537 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2538 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2539
2540 pa_assert(p);
2541
2542 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2543 return TRUE;
2544
2545 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2546
2547 if (pa_streq(ff, "microphone"))
2548 t = "audio-input-microphone";
2549 else if (pa_streq(ff, "webcam"))
2550 t = "camera-web";
2551 else if (pa_streq(ff, "computer"))
2552 t = "computer";
2553 else if (pa_streq(ff, "handset"))
2554 t = "phone";
2555 else if (pa_streq(ff, "portable"))
2556 t = "multimedia-player";
2557 else if (pa_streq(ff, "tv"))
2558 t = "video-display";
2559
2560 /*
2561 * The following icons are not part of the icon naming spec,
2562 * because Rodney Dawes sucks as the maintainer of that spec.
2563 *
2564 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2565 */
2566 else if (pa_streq(ff, "headset"))
2567 t = "audio-headset";
2568 else if (pa_streq(ff, "headphone"))
2569 t = "audio-headphones";
2570 else if (pa_streq(ff, "speaker"))
2571 t = "audio-speakers";
2572 else if (pa_streq(ff, "hands-free"))
2573 t = "audio-handsfree";
2574 }
2575
2576 if (!t)
2577 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2578 if (pa_streq(c, "modem"))
2579 t = "modem";
2580
2581 if (!t) {
2582 if (is_sink)
2583 t = "audio-card";
2584 else
2585 t = "audio-input-microphone";
2586 }
2587
2588 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2589 if (strstr(profile, "analog"))
2590 s = "-analog";
2591 else if (strstr(profile, "iec958"))
2592 s = "-iec958";
2593 else if (strstr(profile, "hdmi"))
2594 s = "-hdmi";
2595 }
2596
2597 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2598
2599 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2600
2601 return TRUE;
2602 }
2603
2604 pa_bool_t pa_device_init_description(pa_proplist *p) {
2605 const char *s, *d = NULL, *k;
2606 pa_assert(p);
2607
2608 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2609 return TRUE;
2610
2611 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2612 if (pa_streq(s, "internal"))
2613 d = _("Internal Audio");
2614
2615 if (!d)
2616 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2617 if (pa_streq(s, "modem"))
2618 d = _("Modem");
2619
2620 if (!d)
2621 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2622
2623 if (!d)
2624 return FALSE;
2625
2626 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2627
2628 if (d && k)
2629 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2630 else if (d)
2631 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2632
2633 return TRUE;
2634 }
2635
2636 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2637 const char *s;
2638 pa_assert(p);
2639
2640 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2641 return TRUE;
2642
2643 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2644 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2645 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2646 return TRUE;
2647 }
2648
2649 return FALSE;
2650 }
2651
2652 unsigned pa_device_init_priority(pa_proplist *p) {
2653 const char *s;
2654 unsigned priority = 0;
2655
2656 pa_assert(p);
2657
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2659
2660 if (pa_streq(s, "sound"))
2661 priority += 9000;
2662 else if (!pa_streq(s, "modem"))
2663 priority += 1000;
2664 }
2665
2666 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2667
2668 if (pa_streq(s, "internal"))
2669 priority += 900;
2670 else if (pa_streq(s, "speaker"))
2671 priority += 500;
2672 else if (pa_streq(s, "headphone"))
2673 priority += 400;
2674 }
2675
2676 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2677
2678 if (pa_streq(s, "pci"))
2679 priority += 50;
2680 else if (pa_streq(s, "usb"))
2681 priority += 40;
2682 else if (pa_streq(s, "bluetooth"))
2683 priority += 30;
2684 }
2685
2686 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2687
2688 if (pa_startswith(s, "analog-"))
2689 priority += 9;
2690 else if (pa_startswith(s, "iec958-"))
2691 priority += 8;
2692 }
2693
2694 return priority;
2695 }