]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Merge remote branch 'tanuk/master'
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
889
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
895 }
896
897 pa_sink_ref(s);
898
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
901
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
905
906 pa_assert(length > 0);
907
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909
910 if (n == 0) {
911
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 } else if (n == 1) {
919 pa_cvolume volume;
920
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
923
924 if (result->length > length)
925 result->length = length;
926
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
928
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
930 pa_memblock_unref(result->memblock);
931 pa_silence_memchunk_get(&s->core->silence_cache,
932 s->core->mempool,
933 result,
934 &s->sample_spec,
935 result->length);
936 } else if (!pa_cvolume_is_norm(&volume)) {
937 pa_memchunk_make_writable(result, 0);
938 pa_volume_memchunk(result, &s->sample_spec, &volume);
939 }
940 } else {
941 void *ptr;
942 result->memblock = pa_memblock_new(s->core->mempool, length);
943
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
946 ptr, length,
947 &s->sample_spec,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
951
952 result->index = 0;
953 }
954
955 inputs_drop(s, info, n, result);
956
957 pa_sink_unref(s);
958 }
959
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
963 unsigned n;
964 size_t length, block_size_max;
965
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target);
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
973
974 pa_assert(!s->thread_info.rewind_requested);
975 pa_assert(s->thread_info.rewind_nbytes == 0);
976
977 if (s->thread_info.state == PA_SINK_SUSPENDED) {
978 pa_silence_memchunk(target, &s->sample_spec);
979 return;
980 }
981
982 pa_sink_ref(s);
983
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
988
989 pa_assert(length > 0);
990
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
992
993 if (n == 0) {
994 if (target->length > length)
995 target->length = length;
996
997 pa_silence_memchunk(target, &s->sample_spec);
998 } else if (n == 1) {
999 pa_cvolume volume;
1000
1001 if (target->length > length)
1002 target->length = length;
1003
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1005
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1008 else {
1009 pa_memchunk vchunk;
1010
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1013
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1016
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1020 }
1021
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1024 }
1025
1026 } else {
1027 void *ptr;
1028
1029 ptr = pa_memblock_acquire(target->memblock);
1030
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1033 &s->sample_spec,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1036
1037 pa_memblock_release(target->memblock);
1038 }
1039
1040 inputs_drop(s, info, n, target);
1041
1042 pa_sink_unref(s);
1043 }
1044
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1047 pa_memchunk chunk;
1048 size_t l, d;
1049
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target);
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1057
1058 pa_assert(!s->thread_info.rewind_requested);
1059 pa_assert(s->thread_info.rewind_nbytes == 0);
1060
1061 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1062 pa_silence_memchunk(target, &s->sample_spec);
1063 return;
1064 }
1065
1066 pa_sink_ref(s);
1067
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1074
1075 pa_sink_render_into(s, &chunk);
1076
1077 d += chunk.length;
1078 l -= chunk.length;
1079 }
1080
1081 pa_sink_unref(s);
1082 }
1083
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_sink_assert_ref(s);
1087 pa_sink_assert_io_context(s);
1088 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1089 pa_assert(length > 0);
1090 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1091 pa_assert(result);
1092
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1095
1096 pa_sink_ref(s);
1097
1098 pa_sink_render(s, length, result);
1099
1100 if (result->length < length) {
1101 pa_memchunk chunk;
1102
1103 pa_memchunk_make_writable(result, length);
1104
1105 chunk.memblock = result->memblock;
1106 chunk.index = result->index + result->length;
1107 chunk.length = length - result->length;
1108
1109 pa_sink_render_into_full(s, &chunk);
1110
1111 result->length = length;
1112 }
1113
1114 pa_sink_unref(s);
1115 }
1116
1117 /* Called from main thread */
1118 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1119 pa_usec_t usec = 0;
1120
1121 pa_sink_assert_ref(s);
1122 pa_assert_ctl_context();
1123 pa_assert(PA_SINK_IS_LINKED(s->state));
1124
1125 /* The returned value is supposed to be in the time domain of the sound card! */
1126
1127 if (s->state == PA_SINK_SUSPENDED)
1128 return 0;
1129
1130 if (!(s->flags & PA_SINK_LATENCY))
1131 return 0;
1132
1133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1134
1135 return usec;
1136 }
1137
1138 /* Called from IO thread */
1139 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1140 pa_usec_t usec = 0;
1141 pa_msgobject *o;
1142
1143 pa_sink_assert_ref(s);
1144 pa_sink_assert_io_context(s);
1145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1146
1147 /* The returned value is supposed to be in the time domain of the sound card! */
1148
1149 if (s->thread_info.state == PA_SINK_SUSPENDED)
1150 return 0;
1151
1152 if (!(s->flags & PA_SINK_LATENCY))
1153 return 0;
1154
1155 o = PA_MSGOBJECT(s);
1156
1157 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1158
1159 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1160 return -1;
1161
1162 return usec;
1163 }
1164
1165 /* Called from main context */
1166 static void compute_reference_ratios(pa_sink *s) {
1167 uint32_t idx;
1168 pa_sink_input *i;
1169
1170 pa_sink_assert_ref(s);
1171 pa_assert_ctl_context();
1172 pa_assert(PA_SINK_IS_LINKED(s->state));
1173 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1174
1175 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1176 unsigned c;
1177 pa_cvolume remapped;
1178
1179 /*
1180 * Calculates the reference volume from the sink's reference
1181 * volume. This basically calculates:
1182 *
1183 * i->reference_ratio = i->volume / s->reference_volume
1184 */
1185
1186 remapped = s->reference_volume;
1187 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1188
1189 i->reference_ratio.channels = i->sample_spec.channels;
1190
1191 for (c = 0; c < i->sample_spec.channels; c++) {
1192
1193 /* We don't update when the sink volume is 0 anyway */
1194 if (remapped.values[c] <= PA_VOLUME_MUTED)
1195 continue;
1196
1197 /* Don't update the reference ratio unless necessary */
1198 if (pa_sw_volume_multiply(
1199 i->reference_ratio.values[c],
1200 remapped.values[c]) == i->volume.values[c])
1201 continue;
1202
1203 i->reference_ratio.values[c] = pa_sw_volume_divide(
1204 i->volume.values[c],
1205 remapped.values[c]);
1206 }
1207 }
1208 }
1209
1210 /* Called from main context */
1211 static void compute_real_ratios(pa_sink *s) {
1212 pa_sink_input *i;
1213 uint32_t idx;
1214
1215 pa_sink_assert_ref(s);
1216 pa_assert_ctl_context();
1217 pa_assert(PA_SINK_IS_LINKED(s->state));
1218 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1219
1220 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1221 unsigned c;
1222 pa_cvolume remapped;
1223
1224 /*
1225 * This basically calculates:
1226 *
1227 * i->real_ratio := i->volume / s->real_volume
1228 * i->soft_volume := i->real_ratio * i->volume_factor
1229 */
1230
1231 remapped = s->real_volume;
1232 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1233
1234 i->real_ratio.channels = i->sample_spec.channels;
1235 i->soft_volume.channels = i->sample_spec.channels;
1236
1237 for (c = 0; c < i->sample_spec.channels; c++) {
1238
1239 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1240 /* We leave i->real_ratio untouched */
1241 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1242 continue;
1243 }
1244
1245 /* Don't lose accuracy unless necessary */
1246 if (pa_sw_volume_multiply(
1247 i->real_ratio.values[c],
1248 remapped.values[c]) != i->volume.values[c])
1249
1250 i->real_ratio.values[c] = pa_sw_volume_divide(
1251 i->volume.values[c],
1252 remapped.values[c]);
1253
1254 i->soft_volume.values[c] = pa_sw_volume_multiply(
1255 i->real_ratio.values[c],
1256 i->volume_factor.values[c]);
1257 }
1258
1259 /* We don't copy the soft_volume to the thread_info data
1260 * here. That must be done by the caller */
1261 }
1262 }
1263
1264 /* Called from main thread */
1265 static void compute_real_volume(pa_sink *s) {
1266 pa_sink_input *i;
1267 uint32_t idx;
1268
1269 pa_sink_assert_ref(s);
1270 pa_assert_ctl_context();
1271 pa_assert(PA_SINK_IS_LINKED(s->state));
1272 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1273
1274 /* This determines the maximum volume of all streams and sets
1275 * s->real_volume accordingly. */
1276
1277 if (pa_idxset_isempty(s->inputs)) {
1278 /* In the special case that we have no sink input we leave the
1279 * volume unmodified. */
1280 s->real_volume = s->reference_volume;
1281 return;
1282 }
1283
1284 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1285
1286 /* First let's determine the new maximum volume of all inputs
1287 * connected to this sink */
1288 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1289 pa_cvolume remapped;
1290
1291 remapped = i->volume;
1292 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1293 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1294 }
1295
1296 /* Then, let's update the real ratios/soft volumes of all inputs
1297 * connected to this sink */
1298 compute_real_ratios(s);
1299 }
1300
1301 /* Called from main thread */
1302 static void propagate_reference_volume(pa_sink *s) {
1303 pa_sink_input *i;
1304 uint32_t idx;
1305
1306 pa_sink_assert_ref(s);
1307 pa_assert_ctl_context();
1308 pa_assert(PA_SINK_IS_LINKED(s->state));
1309 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1310
1311 /* This is called whenever the sink volume changes that is not
1312 * caused by a sink input volume change. We need to fix up the
1313 * sink input volumes accordingly */
1314
1315 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1316 pa_cvolume old_volume, remapped;
1317
1318 old_volume = i->volume;
1319
1320 /* This basically calculates:
1321 *
1322 * i->volume := s->reference_volume * i->reference_ratio */
1323
1324 remapped = s->reference_volume;
1325 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1326 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1327
1328 /* The volume changed, let's tell people so */
1329 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1330
1331 if (i->volume_changed)
1332 i->volume_changed(i);
1333
1334 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1335 }
1336 }
1337 }
1338
1339 /* Called from main thread */
1340 void pa_sink_set_volume(
1341 pa_sink *s,
1342 const pa_cvolume *volume,
1343 pa_bool_t send_msg,
1344 pa_bool_t save) {
1345
1346 pa_cvolume old_reference_volume;
1347 pa_bool_t reference_changed;
1348
1349 pa_sink_assert_ref(s);
1350 pa_assert_ctl_context();
1351 pa_assert(PA_SINK_IS_LINKED(s->state));
1352 pa_assert(!volume || pa_cvolume_valid(volume));
1353 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1354 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1355
1356 /* As a special exception we accept mono volumes on all sinks --
1357 * even on those with more complex channel maps */
1358
1359 /* If volume is NULL we synchronize the sink's real and reference
1360 * volumes with the stream volumes. If it is not NULL we update
1361 * the reference_volume with it. */
1362
1363 old_reference_volume = s->reference_volume;
1364
1365 if (volume) {
1366
1367 if (pa_cvolume_compatible(volume, &s->sample_spec))
1368 s->reference_volume = *volume;
1369 else
1370 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1371
1372 if (s->flags & PA_SINK_FLAT_VOLUME) {
1373 /* OK, propagate this volume change back to the inputs */
1374 propagate_reference_volume(s);
1375
1376 /* And now recalculate the real volume */
1377 compute_real_volume(s);
1378 } else
1379 s->real_volume = s->reference_volume;
1380
1381 } else {
1382 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1383
1384 /* Ok, let's determine the new real volume */
1385 compute_real_volume(s);
1386
1387 /* Let's 'push' the reference volume if necessary */
1388 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1389
1390 /* We need to fix the reference ratios of all streams now that
1391 * we changed the reference volume */
1392 compute_reference_ratios(s);
1393 }
1394
1395 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1396 s->save_volume = (!reference_changed && s->save_volume) || save;
1397
1398 if (s->set_volume) {
1399 /* If we have a function set_volume(), then we do not apply a
1400 * soft volume by default. However, set_volume() is free to
1401 * apply one to s->soft_volume */
1402
1403 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1404 s->set_volume(s);
1405
1406 } else
1407 /* If we have no function set_volume(), then the soft volume
1408 * becomes the virtual volume */
1409 s->soft_volume = s->real_volume;
1410
1411 /* This tells the sink that soft and/or virtual volume changed */
1412 if (send_msg)
1413 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1414
1415 if (reference_changed)
1416 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1417 }
1418
1419 /* Called from main thread. Only to be called by sink implementor */
1420 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1421 pa_sink_assert_ref(s);
1422 pa_assert_ctl_context();
1423
1424 if (!volume)
1425 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1426 else
1427 s->soft_volume = *volume;
1428
1429 if (PA_SINK_IS_LINKED(s->state))
1430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1431 else
1432 s->thread_info.soft_volume = s->soft_volume;
1433 }
1434
1435 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1436 pa_sink_input *i;
1437 uint32_t idx;
1438 pa_cvolume old_reference_volume;
1439
1440 pa_sink_assert_ref(s);
1441 pa_assert_ctl_context();
1442 pa_assert(PA_SINK_IS_LINKED(s->state));
1443
1444 /* This is called when the hardware's real volume changes due to
1445 * some external event. We copy the real volume into our
1446 * reference volume and then rebuild the stream volumes based on
1447 * i->real_ratio which should stay fixed. */
1448
1449 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1450 return;
1451
1452 old_reference_volume = s->reference_volume;
1453
1454 /* 1. Make the real volume the reference volume */
1455 s->reference_volume = s->real_volume;
1456
1457 if (s->flags & PA_SINK_FLAT_VOLUME) {
1458
1459 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1460 pa_cvolume old_volume, remapped;
1461
1462 old_volume = i->volume;
1463
1464 /* 2. Since the sink's reference and real volumes are equal
1465 * now our ratios should be too. */
1466 i->reference_ratio = i->real_ratio;
1467
1468 /* 3. Recalculate the new stream reference volume based on the
1469 * reference ratio and the sink's reference volume.
1470 *
1471 * This basically calculates:
1472 *
1473 * i->volume = s->reference_volume * i->reference_ratio
1474 *
1475 * This is identical to propagate_reference_volume() */
1476 remapped = s->reference_volume;
1477 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1478 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1479
1480 /* Notify if something changed */
1481 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1482
1483 if (i->volume_changed)
1484 i->volume_changed(i);
1485
1486 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1487 }
1488 }
1489 }
1490
1491 /* Something got changed in the hardware. It probably makes sense
1492 * to save changed hw settings given that hw volume changes not
1493 * triggered by PA are almost certainly done by the user. */
1494 s->save_volume = TRUE;
1495
1496 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1497 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1498 }
1499
1500 /* Called from main thread */
1501 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1502 pa_sink_assert_ref(s);
1503 pa_assert_ctl_context();
1504 pa_assert(PA_SINK_IS_LINKED(s->state));
1505
1506 if (s->refresh_volume || force_refresh) {
1507 struct pa_cvolume old_real_volume;
1508
1509 old_real_volume = s->real_volume;
1510
1511 if (s->get_volume)
1512 s->get_volume(s);
1513
1514 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1515
1516 propagate_real_volume(s, &old_real_volume);
1517 }
1518
1519 return &s->reference_volume;
1520 }
1521
1522 /* Called from main thread */
1523 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1524 pa_cvolume old_real_volume;
1525
1526 pa_sink_assert_ref(s);
1527 pa_assert_ctl_context();
1528 pa_assert(PA_SINK_IS_LINKED(s->state));
1529
1530 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1531
1532 old_real_volume = s->real_volume;
1533 s->real_volume = *new_real_volume;
1534
1535 propagate_real_volume(s, &old_real_volume);
1536 }
1537
1538 /* Called from main thread */
1539 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1540 pa_bool_t old_muted;
1541
1542 pa_sink_assert_ref(s);
1543 pa_assert_ctl_context();
1544 pa_assert(PA_SINK_IS_LINKED(s->state));
1545
1546 old_muted = s->muted;
1547 s->muted = mute;
1548 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1549
1550 if (s->set_mute)
1551 s->set_mute(s);
1552
1553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1554
1555 if (old_muted != s->muted)
1556 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1557 }
1558
1559 /* Called from main thread */
1560 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1561
1562 pa_sink_assert_ref(s);
1563 pa_assert_ctl_context();
1564 pa_assert(PA_SINK_IS_LINKED(s->state));
1565
1566 if (s->refresh_muted || force_refresh) {
1567 pa_bool_t old_muted = s->muted;
1568
1569 if (s->get_mute)
1570 s->get_mute(s);
1571
1572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1573
1574 if (old_muted != s->muted) {
1575 s->save_muted = TRUE;
1576
1577 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1578
1579 /* Make sure the soft mute status stays in sync */
1580 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1581 }
1582 }
1583
1584 return s->muted;
1585 }
1586
1587 /* Called from main thread */
1588 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1589 pa_sink_assert_ref(s);
1590 pa_assert_ctl_context();
1591 pa_assert(PA_SINK_IS_LINKED(s->state));
1592
1593 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1594
1595 if (s->muted == new_muted)
1596 return;
1597
1598 s->muted = new_muted;
1599 s->save_muted = TRUE;
1600
1601 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1602 }
1603
1604 /* Called from main thread */
1605 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1606 pa_sink_assert_ref(s);
1607 pa_assert_ctl_context();
1608
1609 if (p)
1610 pa_proplist_update(s->proplist, mode, p);
1611
1612 if (PA_SINK_IS_LINKED(s->state)) {
1613 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1615 }
1616
1617 return TRUE;
1618 }
1619
1620 /* Called from main thread */
1621 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1622 void pa_sink_set_description(pa_sink *s, const char *description) {
1623 const char *old;
1624 pa_sink_assert_ref(s);
1625 pa_assert_ctl_context();
1626
1627 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1628 return;
1629
1630 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1631
1632 if (old && description && pa_streq(old, description))
1633 return;
1634
1635 if (description)
1636 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1637 else
1638 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1639
1640 if (s->monitor_source) {
1641 char *n;
1642
1643 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1644 pa_source_set_description(s->monitor_source, n);
1645 pa_xfree(n);
1646 }
1647
1648 if (PA_SINK_IS_LINKED(s->state)) {
1649 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1650 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1651 }
1652 }
1653
1654 /* Called from main thread */
1655 unsigned pa_sink_linked_by(pa_sink *s) {
1656 unsigned ret;
1657
1658 pa_sink_assert_ref(s);
1659 pa_assert_ctl_context();
1660 pa_assert(PA_SINK_IS_LINKED(s->state));
1661
1662 ret = pa_idxset_size(s->inputs);
1663
1664 /* We add in the number of streams connected to us here. Please
1665 * note the asymmmetry to pa_sink_used_by()! */
1666
1667 if (s->monitor_source)
1668 ret += pa_source_linked_by(s->monitor_source);
1669
1670 return ret;
1671 }
1672
1673 /* Called from main thread */
1674 unsigned pa_sink_used_by(pa_sink *s) {
1675 unsigned ret;
1676
1677 pa_sink_assert_ref(s);
1678 pa_assert_ctl_context();
1679 pa_assert(PA_SINK_IS_LINKED(s->state));
1680
1681 ret = pa_idxset_size(s->inputs);
1682 pa_assert(ret >= s->n_corked);
1683
1684 /* Streams connected to our monitor source do not matter for
1685 * pa_sink_used_by()!.*/
1686
1687 return ret - s->n_corked;
1688 }
1689
1690 /* Called from main thread */
1691 unsigned pa_sink_check_suspend(pa_sink *s) {
1692 unsigned ret;
1693 pa_sink_input *i;
1694 uint32_t idx;
1695
1696 pa_sink_assert_ref(s);
1697 pa_assert_ctl_context();
1698
1699 if (!PA_SINK_IS_LINKED(s->state))
1700 return 0;
1701
1702 ret = 0;
1703
1704 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1705 pa_sink_input_state_t st;
1706
1707 st = pa_sink_input_get_state(i);
1708 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1709
1710 if (st == PA_SINK_INPUT_CORKED)
1711 continue;
1712
1713 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1714 continue;
1715
1716 ret ++;
1717 }
1718
1719 if (s->monitor_source)
1720 ret += pa_source_check_suspend(s->monitor_source);
1721
1722 return ret;
1723 }
1724
1725 /* Called from the IO thread */
1726 static void sync_input_volumes_within_thread(pa_sink *s) {
1727 pa_sink_input *i;
1728 void *state = NULL;
1729
1730 pa_sink_assert_ref(s);
1731 pa_sink_assert_io_context(s);
1732
1733 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1734 if (pa_atomic_load(&i->before_ramping_v))
1735 i->thread_info.future_soft_volume = i->soft_volume;
1736
1737 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1738 continue;
1739
1740 if (!pa_atomic_load(&i->before_ramping_v))
1741 i->thread_info.soft_volume = i->soft_volume;
1742 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1743 }
1744 }
1745
1746 /* Called from IO thread, except when it is not */
1747 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1748 pa_sink *s = PA_SINK(o);
1749 pa_sink_assert_ref(s);
1750
1751 switch ((pa_sink_message_t) code) {
1752
1753 case PA_SINK_MESSAGE_ADD_INPUT: {
1754 pa_sink_input *i = PA_SINK_INPUT(userdata);
1755
1756 /* If you change anything here, make sure to change the
1757 * sink input handling a few lines down at
1758 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1759
1760 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1761
1762 /* Since the caller sleeps in pa_sink_input_put(), we can
1763 * safely access data outside of thread_info even though
1764 * it is mutable */
1765
1766 if ((i->thread_info.sync_prev = i->sync_prev)) {
1767 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1768 pa_assert(i->sync_prev->sync_next == i);
1769 i->thread_info.sync_prev->thread_info.sync_next = i;
1770 }
1771
1772 if ((i->thread_info.sync_next = i->sync_next)) {
1773 pa_assert(i->sink == i->thread_info.sync_next->sink);
1774 pa_assert(i->sync_next->sync_prev == i);
1775 i->thread_info.sync_next->thread_info.sync_prev = i;
1776 }
1777
1778 pa_assert(!i->thread_info.attached);
1779 i->thread_info.attached = TRUE;
1780
1781 if (i->attach)
1782 i->attach(i);
1783
1784 pa_sink_input_set_state_within_thread(i, i->state);
1785
1786 /* The requested latency of the sink input needs to be
1787 * fixed up and then configured on the sink */
1788
1789 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1790 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1791
1792 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1793 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1794
1795 /* We don't rewind here automatically. This is left to the
1796 * sink input implementor because some sink inputs need a
1797 * slow start, i.e. need some time to buffer client
1798 * samples before beginning streaming. */
1799
1800 /* In flat volume mode we need to update the volume as
1801 * well */
1802 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1803 }
1804
1805 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1806 pa_sink_input *i = PA_SINK_INPUT(userdata);
1807
1808 /* If you change anything here, make sure to change the
1809 * sink input handling a few lines down at
1810 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1811
1812 if (i->detach)
1813 i->detach(i);
1814
1815 pa_sink_input_set_state_within_thread(i, i->state);
1816
1817 pa_assert(i->thread_info.attached);
1818 i->thread_info.attached = FALSE;
1819
1820 /* Since the caller sleeps in pa_sink_input_unlink(),
1821 * we can safely access data outside of thread_info even
1822 * though it is mutable */
1823
1824 pa_assert(!i->sync_prev);
1825 pa_assert(!i->sync_next);
1826
1827 if (i->thread_info.sync_prev) {
1828 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1829 i->thread_info.sync_prev = NULL;
1830 }
1831
1832 if (i->thread_info.sync_next) {
1833 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1834 i->thread_info.sync_next = NULL;
1835 }
1836
1837 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1838 pa_sink_input_unref(i);
1839
1840 pa_sink_invalidate_requested_latency(s, TRUE);
1841 pa_sink_request_rewind(s, (size_t) -1);
1842
1843 /* In flat volume mode we need to update the volume as
1844 * well */
1845 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1846 }
1847
1848 case PA_SINK_MESSAGE_START_MOVE: {
1849 pa_sink_input *i = PA_SINK_INPUT(userdata);
1850
1851 /* We don't support moving synchronized streams. */
1852 pa_assert(!i->sync_prev);
1853 pa_assert(!i->sync_next);
1854 pa_assert(!i->thread_info.sync_next);
1855 pa_assert(!i->thread_info.sync_prev);
1856
1857 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1858 pa_usec_t usec = 0;
1859 size_t sink_nbytes, total_nbytes;
1860
1861 /* Get the latency of the sink */
1862 usec = pa_sink_get_latency_within_thread(s);
1863 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1864 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1865
1866 if (total_nbytes > 0) {
1867 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1868 i->thread_info.rewrite_flush = TRUE;
1869 pa_sink_input_process_rewind(i, sink_nbytes);
1870 }
1871 }
1872
1873 if (i->detach)
1874 i->detach(i);
1875
1876 pa_assert(i->thread_info.attached);
1877 i->thread_info.attached = FALSE;
1878
1879 /* Let's remove the sink input ...*/
1880 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1881 pa_sink_input_unref(i);
1882
1883 pa_sink_invalidate_requested_latency(s, TRUE);
1884
1885 pa_log_debug("Requesting rewind due to started move");
1886 pa_sink_request_rewind(s, (size_t) -1);
1887
1888 /* In flat volume mode we need to update the volume as
1889 * well */
1890 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1891 }
1892
1893 case PA_SINK_MESSAGE_FINISH_MOVE: {
1894 pa_sink_input *i = PA_SINK_INPUT(userdata);
1895
1896 /* We don't support moving synchronized streams. */
1897 pa_assert(!i->sync_prev);
1898 pa_assert(!i->sync_next);
1899 pa_assert(!i->thread_info.sync_next);
1900 pa_assert(!i->thread_info.sync_prev);
1901
1902 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1903
1904 pa_assert(!i->thread_info.attached);
1905 i->thread_info.attached = TRUE;
1906
1907 if (i->attach)
1908 i->attach(i);
1909
1910 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1911 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1912
1913 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1914 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1915
1916 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1917 pa_usec_t usec = 0;
1918 size_t nbytes;
1919
1920 /* Get the latency of the sink */
1921 usec = pa_sink_get_latency_within_thread(s);
1922 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1923
1924 if (nbytes > 0)
1925 pa_sink_input_drop(i, nbytes);
1926
1927 pa_log_debug("Requesting rewind due to finished move");
1928 pa_sink_request_rewind(s, nbytes);
1929 }
1930
1931 /* In flat volume mode we need to update the volume as
1932 * well */
1933 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1934 }
1935
1936 case PA_SINK_MESSAGE_SET_VOLUME:
1937
1938 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1939 s->thread_info.soft_volume = s->soft_volume;
1940 pa_sink_request_rewind(s, (size_t) -1);
1941 }
1942
1943 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1944 return 0;
1945
1946 /* Fall through ... */
1947
1948 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1949 sync_input_volumes_within_thread(s);
1950 return 0;
1951
1952 case PA_SINK_MESSAGE_GET_VOLUME:
1953 return 0;
1954
1955 case PA_SINK_MESSAGE_SET_MUTE:
1956
1957 if (s->thread_info.soft_muted != s->muted) {
1958 s->thread_info.soft_muted = s->muted;
1959 pa_sink_request_rewind(s, (size_t) -1);
1960 }
1961
1962 return 0;
1963
1964 case PA_SINK_MESSAGE_GET_MUTE:
1965 return 0;
1966
1967 case PA_SINK_MESSAGE_SET_STATE: {
1968
1969 pa_bool_t suspend_change =
1970 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1971 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1972
1973 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1974
1975 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1976 s->thread_info.rewind_nbytes = 0;
1977 s->thread_info.rewind_requested = FALSE;
1978 }
1979
1980 if (suspend_change) {
1981 pa_sink_input *i;
1982 void *state = NULL;
1983
1984 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1985 if (i->suspend_within_thread)
1986 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1987 }
1988
1989 return 0;
1990 }
1991
1992 case PA_SINK_MESSAGE_DETACH:
1993
1994 /* Detach all streams */
1995 pa_sink_detach_within_thread(s);
1996 return 0;
1997
1998 case PA_SINK_MESSAGE_ATTACH:
1999
2000 /* Reattach all streams */
2001 pa_sink_attach_within_thread(s);
2002 return 0;
2003
2004 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2005
2006 pa_usec_t *usec = userdata;
2007 *usec = pa_sink_get_requested_latency_within_thread(s);
2008
2009 /* Yes, that's right, the IO thread will see -1 when no
2010 * explicit requested latency is configured, the main
2011 * thread will see max_latency */
2012 if (*usec == (pa_usec_t) -1)
2013 *usec = s->thread_info.max_latency;
2014
2015 return 0;
2016 }
2017
2018 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2019 pa_usec_t *r = userdata;
2020
2021 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2022
2023 return 0;
2024 }
2025
2026 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2027 pa_usec_t *r = userdata;
2028
2029 r[0] = s->thread_info.min_latency;
2030 r[1] = s->thread_info.max_latency;
2031
2032 return 0;
2033 }
2034
2035 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2036
2037 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2038 return 0;
2039
2040 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2041
2042 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2043 return 0;
2044
2045 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2046
2047 *((size_t*) userdata) = s->thread_info.max_rewind;
2048 return 0;
2049
2050 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2051
2052 *((size_t*) userdata) = s->thread_info.max_request;
2053 return 0;
2054
2055 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2056
2057 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2058 return 0;
2059
2060 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2061
2062 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2063 return 0;
2064
2065 case PA_SINK_MESSAGE_GET_LATENCY:
2066 case PA_SINK_MESSAGE_MAX:
2067 ;
2068 }
2069
2070 return -1;
2071 }
2072
2073 /* Called from main thread */
2074 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2075 pa_sink *sink;
2076 uint32_t idx;
2077 int ret = 0;
2078
2079 pa_core_assert_ref(c);
2080 pa_assert_ctl_context();
2081 pa_assert(cause != 0);
2082
2083 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2084 int r;
2085
2086 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2087 ret = r;
2088 }
2089
2090 return ret;
2091 }
2092
2093 /* Called from main thread */
2094 void pa_sink_detach(pa_sink *s) {
2095 pa_sink_assert_ref(s);
2096 pa_assert_ctl_context();
2097 pa_assert(PA_SINK_IS_LINKED(s->state));
2098
2099 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2100 }
2101
2102 /* Called from main thread */
2103 void pa_sink_attach(pa_sink *s) {
2104 pa_sink_assert_ref(s);
2105 pa_assert_ctl_context();
2106 pa_assert(PA_SINK_IS_LINKED(s->state));
2107
2108 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2109 }
2110
2111 /* Called from IO thread */
2112 void pa_sink_detach_within_thread(pa_sink *s) {
2113 pa_sink_input *i;
2114 void *state = NULL;
2115
2116 pa_sink_assert_ref(s);
2117 pa_sink_assert_io_context(s);
2118 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2119
2120 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2121 if (i->detach)
2122 i->detach(i);
2123
2124 if (s->monitor_source)
2125 pa_source_detach_within_thread(s->monitor_source);
2126 }
2127
2128 /* Called from IO thread */
2129 void pa_sink_attach_within_thread(pa_sink *s) {
2130 pa_sink_input *i;
2131 void *state = NULL;
2132
2133 pa_sink_assert_ref(s);
2134 pa_sink_assert_io_context(s);
2135 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2136
2137 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2138 if (i->attach)
2139 i->attach(i);
2140
2141 if (s->monitor_source)
2142 pa_source_attach_within_thread(s->monitor_source);
2143 }
2144
2145 /* Called from IO thread */
2146 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2147 pa_sink_assert_ref(s);
2148 pa_sink_assert_io_context(s);
2149 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2150
2151 if (s->thread_info.state == PA_SINK_SUSPENDED)
2152 return;
2153
2154 if (nbytes == (size_t) -1)
2155 nbytes = s->thread_info.max_rewind;
2156
2157 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2158
2159 if (s->thread_info.rewind_requested &&
2160 nbytes <= s->thread_info.rewind_nbytes)
2161 return;
2162
2163 s->thread_info.rewind_nbytes = nbytes;
2164 s->thread_info.rewind_requested = TRUE;
2165
2166 if (s->request_rewind)
2167 s->request_rewind(s);
2168 }
2169
2170 /* Called from IO thread */
2171 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2172 pa_usec_t result = (pa_usec_t) -1;
2173 pa_sink_input *i;
2174 void *state = NULL;
2175 pa_usec_t monitor_latency;
2176
2177 pa_sink_assert_ref(s);
2178 pa_sink_assert_io_context(s);
2179
2180 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2181 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2182
2183 if (s->thread_info.requested_latency_valid)
2184 return s->thread_info.requested_latency;
2185
2186 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2187 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2188 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2189 result = i->thread_info.requested_sink_latency;
2190
2191 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2192
2193 if (monitor_latency != (pa_usec_t) -1 &&
2194 (result == (pa_usec_t) -1 || result > monitor_latency))
2195 result = monitor_latency;
2196
2197 if (result != (pa_usec_t) -1)
2198 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2199
2200 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2201 /* Only cache if properly initialized */
2202 s->thread_info.requested_latency = result;
2203 s->thread_info.requested_latency_valid = TRUE;
2204 }
2205
2206 return result;
2207 }
2208
2209 /* Called from main thread */
2210 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2211 pa_usec_t usec = 0;
2212
2213 pa_sink_assert_ref(s);
2214 pa_assert_ctl_context();
2215 pa_assert(PA_SINK_IS_LINKED(s->state));
2216
2217 if (s->state == PA_SINK_SUSPENDED)
2218 return 0;
2219
2220 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2221 return usec;
2222 }
2223
2224 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2225 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2226 pa_sink_input *i;
2227 void *state = NULL;
2228
2229 pa_sink_assert_ref(s);
2230 pa_sink_assert_io_context(s);
2231
2232 if (max_rewind == s->thread_info.max_rewind)
2233 return;
2234
2235 s->thread_info.max_rewind = max_rewind;
2236
2237 if (PA_SINK_IS_LINKED(s->thread_info.state))
2238 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2239 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2240
2241 if (s->monitor_source)
2242 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2243 }
2244
2245 /* Called from main thread */
2246 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2247 pa_sink_assert_ref(s);
2248 pa_assert_ctl_context();
2249
2250 if (PA_SINK_IS_LINKED(s->state))
2251 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2252 else
2253 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2254 }
2255
2256 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2257 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2258 void *state = NULL;
2259
2260 pa_sink_assert_ref(s);
2261 pa_sink_assert_io_context(s);
2262
2263 if (max_request == s->thread_info.max_request)
2264 return;
2265
2266 s->thread_info.max_request = max_request;
2267
2268 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2269 pa_sink_input *i;
2270
2271 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2272 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2273 }
2274 }
2275
2276 /* Called from main thread */
2277 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2278 pa_sink_assert_ref(s);
2279 pa_assert_ctl_context();
2280
2281 if (PA_SINK_IS_LINKED(s->state))
2282 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2283 else
2284 pa_sink_set_max_request_within_thread(s, max_request);
2285 }
2286
2287 /* Called from IO thread */
2288 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2289 pa_sink_input *i;
2290 void *state = NULL;
2291
2292 pa_sink_assert_ref(s);
2293 pa_sink_assert_io_context(s);
2294
2295 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2296 s->thread_info.requested_latency_valid = FALSE;
2297 else if (dynamic)
2298 return;
2299
2300 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2301
2302 if (s->update_requested_latency)
2303 s->update_requested_latency(s);
2304
2305 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2306 if (i->update_sink_requested_latency)
2307 i->update_sink_requested_latency(i);
2308 }
2309 }
2310
2311 /* Called from main thread */
2312 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2313 pa_sink_assert_ref(s);
2314 pa_assert_ctl_context();
2315
2316 /* min_latency == 0: no limit
2317 * min_latency anything else: specified limit
2318 *
2319 * Similar for max_latency */
2320
2321 if (min_latency < ABSOLUTE_MIN_LATENCY)
2322 min_latency = ABSOLUTE_MIN_LATENCY;
2323
2324 if (max_latency <= 0 ||
2325 max_latency > ABSOLUTE_MAX_LATENCY)
2326 max_latency = ABSOLUTE_MAX_LATENCY;
2327
2328 pa_assert(min_latency <= max_latency);
2329
2330 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2331 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2332 max_latency == ABSOLUTE_MAX_LATENCY) ||
2333 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2334
2335 if (PA_SINK_IS_LINKED(s->state)) {
2336 pa_usec_t r[2];
2337
2338 r[0] = min_latency;
2339 r[1] = max_latency;
2340
2341 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2342 } else
2343 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2344 }
2345
2346 /* Called from main thread */
2347 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2348 pa_sink_assert_ref(s);
2349 pa_assert_ctl_context();
2350 pa_assert(min_latency);
2351 pa_assert(max_latency);
2352
2353 if (PA_SINK_IS_LINKED(s->state)) {
2354 pa_usec_t r[2] = { 0, 0 };
2355
2356 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2357
2358 *min_latency = r[0];
2359 *max_latency = r[1];
2360 } else {
2361 *min_latency = s->thread_info.min_latency;
2362 *max_latency = s->thread_info.max_latency;
2363 }
2364 }
2365
2366 /* Called from IO thread */
2367 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2368 pa_sink_assert_ref(s);
2369 pa_sink_assert_io_context(s);
2370
2371 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2372 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2373 pa_assert(min_latency <= max_latency);
2374
2375 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2376 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2377 max_latency == ABSOLUTE_MAX_LATENCY) ||
2378 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2379
2380 if (s->thread_info.min_latency == min_latency &&
2381 s->thread_info.max_latency == max_latency)
2382 return;
2383
2384 s->thread_info.min_latency = min_latency;
2385 s->thread_info.max_latency = max_latency;
2386
2387 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2388 pa_sink_input *i;
2389 void *state = NULL;
2390
2391 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2392 if (i->update_sink_latency_range)
2393 i->update_sink_latency_range(i);
2394 }
2395
2396 pa_sink_invalidate_requested_latency(s, FALSE);
2397
2398 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2399 }
2400
2401 /* Called from main thread */
2402 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2403 pa_sink_assert_ref(s);
2404 pa_assert_ctl_context();
2405
2406 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2407 pa_assert(latency == 0);
2408 return;
2409 }
2410
2411 if (latency < ABSOLUTE_MIN_LATENCY)
2412 latency = ABSOLUTE_MIN_LATENCY;
2413
2414 if (latency > ABSOLUTE_MAX_LATENCY)
2415 latency = ABSOLUTE_MAX_LATENCY;
2416
2417 if (PA_SINK_IS_LINKED(s->state))
2418 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2419 else
2420 s->thread_info.fixed_latency = latency;
2421
2422 pa_source_set_fixed_latency(s->monitor_source, latency);
2423 }
2424
2425 /* Called from main thread */
2426 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2427 pa_usec_t latency;
2428
2429 pa_sink_assert_ref(s);
2430 pa_assert_ctl_context();
2431
2432 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2433 return 0;
2434
2435 if (PA_SINK_IS_LINKED(s->state))
2436 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2437 else
2438 latency = s->thread_info.fixed_latency;
2439
2440 return latency;
2441 }
2442
2443 /* Called from IO thread */
2444 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2445 pa_sink_assert_ref(s);
2446 pa_sink_assert_io_context(s);
2447
2448 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2449 pa_assert(latency == 0);
2450 return;
2451 }
2452
2453 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2454 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2455
2456 if (s->thread_info.fixed_latency == latency)
2457 return;
2458
2459 s->thread_info.fixed_latency = latency;
2460
2461 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2462 pa_sink_input *i;
2463 void *state = NULL;
2464
2465 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2466 if (i->update_sink_fixed_latency)
2467 i->update_sink_fixed_latency(i);
2468 }
2469
2470 pa_sink_invalidate_requested_latency(s, FALSE);
2471
2472 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2473 }
2474
2475 /* Called from main context */
2476 size_t pa_sink_get_max_rewind(pa_sink *s) {
2477 size_t r;
2478 pa_sink_assert_ref(s);
2479 pa_assert_ctl_context();
2480
2481 if (!PA_SINK_IS_LINKED(s->state))
2482 return s->thread_info.max_rewind;
2483
2484 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2485
2486 return r;
2487 }
2488
2489 /* Called from main context */
2490 size_t pa_sink_get_max_request(pa_sink *s) {
2491 size_t r;
2492 pa_sink_assert_ref(s);
2493 pa_assert_ctl_context();
2494
2495 if (!PA_SINK_IS_LINKED(s->state))
2496 return s->thread_info.max_request;
2497
2498 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2499
2500 return r;
2501 }
2502
2503 /* Called from main context */
2504 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2505 pa_device_port *port;
2506
2507 pa_sink_assert_ref(s);
2508 pa_assert_ctl_context();
2509
2510 if (!s->set_port) {
2511 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2512 return -PA_ERR_NOTIMPLEMENTED;
2513 }
2514
2515 if (!s->ports)
2516 return -PA_ERR_NOENTITY;
2517
2518 if (!(port = pa_hashmap_get(s->ports, name)))
2519 return -PA_ERR_NOENTITY;
2520
2521 if (s->active_port == port) {
2522 s->save_port = s->save_port || save;
2523 return 0;
2524 }
2525
2526 if ((s->set_port(s, port)) < 0)
2527 return -PA_ERR_NOENTITY;
2528
2529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2530
2531 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2532
2533 s->active_port = port;
2534 s->save_port = save;
2535
2536 return 0;
2537 }
2538
2539 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2540 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2541
2542 pa_assert(p);
2543
2544 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2545 return TRUE;
2546
2547 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2548
2549 if (pa_streq(ff, "microphone"))
2550 t = "audio-input-microphone";
2551 else if (pa_streq(ff, "webcam"))
2552 t = "camera-web";
2553 else if (pa_streq(ff, "computer"))
2554 t = "computer";
2555 else if (pa_streq(ff, "handset"))
2556 t = "phone";
2557 else if (pa_streq(ff, "portable"))
2558 t = "multimedia-player";
2559 else if (pa_streq(ff, "tv"))
2560 t = "video-display";
2561
2562 /*
2563 * The following icons are not part of the icon naming spec,
2564 * because Rodney Dawes sucks as the maintainer of that spec.
2565 *
2566 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2567 */
2568 else if (pa_streq(ff, "headset"))
2569 t = "audio-headset";
2570 else if (pa_streq(ff, "headphone"))
2571 t = "audio-headphones";
2572 else if (pa_streq(ff, "speaker"))
2573 t = "audio-speakers";
2574 else if (pa_streq(ff, "hands-free"))
2575 t = "audio-handsfree";
2576 }
2577
2578 if (!t)
2579 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2580 if (pa_streq(c, "modem"))
2581 t = "modem";
2582
2583 if (!t) {
2584 if (is_sink)
2585 t = "audio-card";
2586 else
2587 t = "audio-input-microphone";
2588 }
2589
2590 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2591 if (strstr(profile, "analog"))
2592 s = "-analog";
2593 else if (strstr(profile, "iec958"))
2594 s = "-iec958";
2595 else if (strstr(profile, "hdmi"))
2596 s = "-hdmi";
2597 }
2598
2599 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2600
2601 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2602
2603 return TRUE;
2604 }
2605
2606 pa_bool_t pa_device_init_description(pa_proplist *p) {
2607 const char *s, *d = NULL, *k;
2608 pa_assert(p);
2609
2610 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2611 return TRUE;
2612
2613 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2614 if (pa_streq(s, "internal"))
2615 d = _("Internal Audio");
2616
2617 if (!d)
2618 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2619 if (pa_streq(s, "modem"))
2620 d = _("Modem");
2621
2622 if (!d)
2623 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2624
2625 if (!d)
2626 return FALSE;
2627
2628 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2629
2630 if (d && k)
2631 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2632 else if (d)
2633 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2634
2635 return TRUE;
2636 }
2637
2638 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2639 const char *s;
2640 pa_assert(p);
2641
2642 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2643 return TRUE;
2644
2645 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2646 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2647 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2648 return TRUE;
2649 }
2650
2651 return FALSE;
2652 }
2653
2654 unsigned pa_device_init_priority(pa_proplist *p) {
2655 const char *s;
2656 unsigned priority = 0;
2657
2658 pa_assert(p);
2659
2660 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2661
2662 if (pa_streq(s, "sound"))
2663 priority += 9000;
2664 else if (!pa_streq(s, "modem"))
2665 priority += 1000;
2666 }
2667
2668 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2669
2670 if (pa_streq(s, "internal"))
2671 priority += 900;
2672 else if (pa_streq(s, "speaker"))
2673 priority += 500;
2674 else if (pa_streq(s, "headphone"))
2675 priority += 400;
2676 }
2677
2678 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2679
2680 if (pa_streq(s, "pci"))
2681 priority += 50;
2682 else if (pa_streq(s, "usb"))
2683 priority += 40;
2684 else if (pa_streq(s, "bluetooth"))
2685 priority += 30;
2686 }
2687
2688 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2689
2690 if (pa_startswith(s, "analog-"))
2691 priority += 9;
2692 else if (pa_startswith(s, "iec958-"))
2693 priority += 8;
2694 }
2695
2696 return priority;
2697 }