]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Merge remote branch 'phish3/master'
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
889
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
895 }
896
897 pa_sink_ref(s);
898
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
901
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
905
906 pa_assert(length > 0);
907
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909
910 if (n == 0) {
911
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 } else if (n == 1) {
919 pa_cvolume volume;
920
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
923
924 if (result->length > length)
925 result->length = length;
926
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
928
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
930 pa_memblock_unref(result->memblock);
931 pa_silence_memchunk_get(&s->core->silence_cache,
932 s->core->mempool,
933 result,
934 &s->sample_spec,
935 result->length);
936 } else if (!pa_cvolume_is_norm(&volume)) {
937 pa_memchunk_make_writable(result, 0);
938 pa_volume_memchunk(result, &s->sample_spec, &volume);
939 }
940 } else {
941 void *ptr;
942 result->memblock = pa_memblock_new(s->core->mempool, length);
943
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
946 ptr, length,
947 &s->sample_spec,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
951
952 result->index = 0;
953 }
954
955 inputs_drop(s, info, n, result);
956
957 pa_sink_unref(s);
958 }
959
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
963 unsigned n;
964 size_t length, block_size_max;
965
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target);
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
973
974 pa_assert(!s->thread_info.rewind_requested);
975 pa_assert(s->thread_info.rewind_nbytes == 0);
976
977 if (s->thread_info.state == PA_SINK_SUSPENDED) {
978 pa_silence_memchunk(target, &s->sample_spec);
979 return;
980 }
981
982 pa_sink_ref(s);
983
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
988
989 pa_assert(length > 0);
990
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
992
993 if (n == 0) {
994 if (target->length > length)
995 target->length = length;
996
997 pa_silence_memchunk(target, &s->sample_spec);
998 } else if (n == 1) {
999 pa_cvolume volume;
1000
1001 if (target->length > length)
1002 target->length = length;
1003
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1005
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1008 else {
1009 pa_memchunk vchunk;
1010
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1013
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1016
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1020 }
1021
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1024 }
1025
1026 } else {
1027 void *ptr;
1028
1029 ptr = pa_memblock_acquire(target->memblock);
1030
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1033 &s->sample_spec,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1036
1037 pa_memblock_release(target->memblock);
1038 }
1039
1040 inputs_drop(s, info, n, target);
1041
1042 pa_sink_unref(s);
1043 }
1044
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1047 pa_memchunk chunk;
1048 size_t l, d;
1049
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target);
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1057
1058 pa_assert(!s->thread_info.rewind_requested);
1059 pa_assert(s->thread_info.rewind_nbytes == 0);
1060
1061 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1062 pa_silence_memchunk(target, &s->sample_spec);
1063 return;
1064 }
1065
1066 pa_sink_ref(s);
1067
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1074
1075 pa_sink_render_into(s, &chunk);
1076
1077 d += chunk.length;
1078 l -= chunk.length;
1079 }
1080
1081 pa_sink_unref(s);
1082 }
1083
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_sink_assert_ref(s);
1087 pa_sink_assert_io_context(s);
1088 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1089 pa_assert(length > 0);
1090 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1091 pa_assert(result);
1092
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1095
1096 pa_sink_ref(s);
1097
1098 pa_sink_render(s, length, result);
1099
1100 if (result->length < length) {
1101 pa_memchunk chunk;
1102
1103 pa_memchunk_make_writable(result, length);
1104
1105 chunk.memblock = result->memblock;
1106 chunk.index = result->index + result->length;
1107 chunk.length = length - result->length;
1108
1109 pa_sink_render_into_full(s, &chunk);
1110
1111 result->length = length;
1112 }
1113
1114 pa_sink_unref(s);
1115 }
1116
1117 /* Called from main thread */
1118 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1119 pa_usec_t usec = 0;
1120
1121 pa_sink_assert_ref(s);
1122 pa_assert_ctl_context();
1123 pa_assert(PA_SINK_IS_LINKED(s->state));
1124
1125 /* The returned value is supposed to be in the time domain of the sound card! */
1126
1127 if (s->state == PA_SINK_SUSPENDED)
1128 return 0;
1129
1130 if (!(s->flags & PA_SINK_LATENCY))
1131 return 0;
1132
1133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1134
1135 return usec;
1136 }
1137
1138 /* Called from IO thread */
1139 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1140 pa_usec_t usec = 0;
1141 pa_msgobject *o;
1142
1143 pa_sink_assert_ref(s);
1144 pa_sink_assert_io_context(s);
1145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1146
1147 /* The returned value is supposed to be in the time domain of the sound card! */
1148
1149 if (s->thread_info.state == PA_SINK_SUSPENDED)
1150 return 0;
1151
1152 if (!(s->flags & PA_SINK_LATENCY))
1153 return 0;
1154
1155 o = PA_MSGOBJECT(s);
1156
1157 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1158
1159 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1160 return -1;
1161
1162 return usec;
1163 }
1164
1165 /* Called from main context */
1166 static void compute_reference_ratios(pa_sink *s) {
1167 uint32_t idx;
1168 pa_sink_input *i;
1169
1170 pa_sink_assert_ref(s);
1171 pa_assert_ctl_context();
1172 pa_assert(PA_SINK_IS_LINKED(s->state));
1173 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1174
1175 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1176 unsigned c;
1177 pa_cvolume remapped;
1178
1179 /*
1180 * Calculates the reference volume from the sink's reference
1181 * volume. This basically calculates:
1182 *
1183 * i->reference_ratio = i->volume / s->reference_volume
1184 */
1185
1186 remapped = s->reference_volume;
1187 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1188
1189 i->reference_ratio.channels = i->sample_spec.channels;
1190
1191 for (c = 0; c < i->sample_spec.channels; c++) {
1192
1193 /* We don't update when the sink volume is 0 anyway */
1194 if (remapped.values[c] <= PA_VOLUME_MUTED)
1195 continue;
1196
1197 /* Don't update the reference ratio unless necessary */
1198 if (pa_sw_volume_multiply(
1199 i->reference_ratio.values[c],
1200 remapped.values[c]) == i->volume.values[c])
1201 continue;
1202
1203 i->reference_ratio.values[c] = pa_sw_volume_divide(
1204 i->volume.values[c],
1205 remapped.values[c]);
1206 }
1207 }
1208 }
1209
1210 /* Called from main context */
1211 static void compute_real_ratios(pa_sink *s) {
1212 pa_sink_input *i;
1213 uint32_t idx;
1214
1215 pa_sink_assert_ref(s);
1216 pa_assert_ctl_context();
1217 pa_assert(PA_SINK_IS_LINKED(s->state));
1218 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1219
1220 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1221 unsigned c;
1222 pa_cvolume remapped;
1223
1224 /*
1225 * This basically calculates:
1226 *
1227 * i->real_ratio := i->volume / s->real_volume
1228 * i->soft_volume := i->real_ratio * i->volume_factor
1229 */
1230
1231 remapped = s->real_volume;
1232 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1233
1234 i->real_ratio.channels = i->sample_spec.channels;
1235 i->soft_volume.channels = i->sample_spec.channels;
1236
1237 for (c = 0; c < i->sample_spec.channels; c++) {
1238
1239 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1240 /* We leave i->real_ratio untouched */
1241 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1242 continue;
1243 }
1244
1245 /* Don't lose accuracy unless necessary */
1246 if (pa_sw_volume_multiply(
1247 i->real_ratio.values[c],
1248 remapped.values[c]) != i->volume.values[c])
1249
1250 i->real_ratio.values[c] = pa_sw_volume_divide(
1251 i->volume.values[c],
1252 remapped.values[c]);
1253
1254 i->soft_volume.values[c] = pa_sw_volume_multiply(
1255 i->real_ratio.values[c],
1256 i->volume_factor.values[c]);
1257 }
1258
1259 /* We don't copy the soft_volume to the thread_info data
1260 * here. That must be done by the caller */
1261 }
1262 }
1263
1264 /* Called from main thread */
1265 static void compute_real_volume(pa_sink *s) {
1266 pa_sink_input *i;
1267 uint32_t idx;
1268
1269 pa_sink_assert_ref(s);
1270 pa_assert_ctl_context();
1271 pa_assert(PA_SINK_IS_LINKED(s->state));
1272 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1273
1274 /* This determines the maximum volume of all streams and sets
1275 * s->real_volume accordingly. */
1276
1277 if (pa_idxset_isempty(s->inputs)) {
1278 /* In the special case that we have no sink input we leave the
1279 * volume unmodified. */
1280 s->real_volume = s->reference_volume;
1281 return;
1282 }
1283
1284 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1285
1286 /* First let's determine the new maximum volume of all inputs
1287 * connected to this sink */
1288 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1289 pa_cvolume remapped;
1290
1291 remapped = i->volume;
1292 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1293 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1294 }
1295
1296 /* Then, let's update the real ratios/soft volumes of all inputs
1297 * connected to this sink */
1298 compute_real_ratios(s);
1299 }
1300
1301 /* Called from main thread */
1302 static void propagate_reference_volume(pa_sink *s) {
1303 pa_sink_input *i;
1304 uint32_t idx;
1305
1306 pa_sink_assert_ref(s);
1307 pa_assert_ctl_context();
1308 pa_assert(PA_SINK_IS_LINKED(s->state));
1309 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1310
1311 /* This is called whenever the sink volume changes that is not
1312 * caused by a sink input volume change. We need to fix up the
1313 * sink input volumes accordingly */
1314
1315 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1316 pa_cvolume old_volume, remapped;
1317
1318 old_volume = i->volume;
1319
1320 /* This basically calculates:
1321 *
1322 * i->volume := s->reference_volume * i->reference_ratio */
1323
1324 remapped = s->reference_volume;
1325 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1326 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1327
1328 /* The volume changed, let's tell people so */
1329 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1330
1331 if (i->volume_changed)
1332 i->volume_changed(i);
1333
1334 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1335 }
1336 }
1337 }
1338
1339 /* Called from main thread */
1340 void pa_sink_set_volume(
1341 pa_sink *s,
1342 const pa_cvolume *volume,
1343 pa_bool_t send_msg,
1344 pa_bool_t save) {
1345
1346 pa_cvolume old_reference_volume;
1347 pa_bool_t reference_changed;
1348
1349 pa_sink_assert_ref(s);
1350 pa_assert_ctl_context();
1351 pa_assert(PA_SINK_IS_LINKED(s->state));
1352 pa_assert(!volume || pa_cvolume_valid(volume));
1353 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1354 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1355
1356 /* As a special exception we accept mono volumes on all sinks --
1357 * even on those with more complex channel maps */
1358
1359 /* If volume is NULL we synchronize the sink's real and reference
1360 * volumes with the stream volumes. If it is not NULL we update
1361 * the reference_volume with it. */
1362
1363 old_reference_volume = s->reference_volume;
1364
1365 if (volume) {
1366
1367 if (pa_cvolume_compatible(volume, &s->sample_spec))
1368 s->reference_volume = *volume;
1369 else
1370 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1371
1372 if (s->flags & PA_SINK_FLAT_VOLUME) {
1373 /* OK, propagate this volume change back to the inputs */
1374 propagate_reference_volume(s);
1375
1376 /* And now recalculate the real volume */
1377 compute_real_volume(s);
1378 } else
1379 s->real_volume = s->reference_volume;
1380
1381 } else {
1382 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1383
1384 /* Ok, let's determine the new real volume */
1385 compute_real_volume(s);
1386
1387 /* Let's 'push' the reference volume if necessary */
1388 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1389
1390 /* We need to fix the reference ratios of all streams now that
1391 * we changed the reference volume */
1392 compute_reference_ratios(s);
1393 }
1394
1395 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1396 s->save_volume = (!reference_changed && s->save_volume) || save;
1397
1398 if (s->set_volume) {
1399 /* If we have a function set_volume(), then we do not apply a
1400 * soft volume by default. However, set_volume() is free to
1401 * apply one to s->soft_volume */
1402
1403 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1404 s->set_volume(s);
1405
1406 } else
1407 /* If we have no function set_volume(), then the soft volume
1408 * becomes the virtual volume */
1409 s->soft_volume = s->real_volume;
1410
1411 /* This tells the sink that soft and/or virtual volume changed */
1412 if (send_msg)
1413 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1414
1415 if (reference_changed)
1416 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1417 }
1418
1419 /* Called from main thread. Only to be called by sink implementor */
1420 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1421 pa_sink_assert_ref(s);
1422 pa_assert_ctl_context();
1423
1424 if (!volume)
1425 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1426 else
1427 s->soft_volume = *volume;
1428
1429 if (PA_SINK_IS_LINKED(s->state))
1430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1431 else
1432 s->thread_info.soft_volume = s->soft_volume;
1433 }
1434
1435 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1436 pa_sink_input *i;
1437 uint32_t idx;
1438 pa_cvolume old_reference_volume;
1439
1440 pa_sink_assert_ref(s);
1441 pa_assert_ctl_context();
1442 pa_assert(PA_SINK_IS_LINKED(s->state));
1443
1444 /* This is called when the hardware's real volume changes due to
1445 * some external event. We copy the real volume into our
1446 * reference volume and then rebuild the stream volumes based on
1447 * i->real_ratio which should stay fixed. */
1448
1449 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1450 return;
1451
1452 old_reference_volume = s->reference_volume;
1453
1454 /* 1. Make the real volume the reference volume */
1455 s->reference_volume = s->real_volume;
1456
1457 if (s->flags & PA_SINK_FLAT_VOLUME) {
1458
1459 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1460 pa_cvolume old_volume, remapped;
1461
1462 old_volume = i->volume;
1463
1464 /* 2. Since the sink's reference and real volumes are equal
1465 * now our ratios should be too. */
1466 i->reference_ratio = i->real_ratio;
1467
1468 /* 3. Recalculate the new stream reference volume based on the
1469 * reference ratio and the sink's reference volume.
1470 *
1471 * This basically calculates:
1472 *
1473 * i->volume = s->reference_volume * i->reference_ratio
1474 *
1475 * This is identical to propagate_reference_volume() */
1476 remapped = s->reference_volume;
1477 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1478 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1479
1480 /* Notify if something changed */
1481 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1482
1483 if (i->volume_changed)
1484 i->volume_changed(i);
1485
1486 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1487 }
1488 }
1489 }
1490
1491 /* Something got changed in the hardware. It probably makes sense
1492 * to save changed hw settings given that hw volume changes not
1493 * triggered by PA are almost certainly done by the user. */
1494 s->save_volume = TRUE;
1495
1496 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1497 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1498 }
1499
1500 /* Called from main thread */
1501 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1502 pa_sink_assert_ref(s);
1503 pa_assert_ctl_context();
1504 pa_assert(PA_SINK_IS_LINKED(s->state));
1505
1506 if (s->refresh_volume || force_refresh) {
1507 struct pa_cvolume old_real_volume;
1508
1509 old_real_volume = s->real_volume;
1510
1511 if (s->get_volume)
1512 s->get_volume(s);
1513
1514 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1515
1516 propagate_real_volume(s, &old_real_volume);
1517 }
1518
1519 return &s->reference_volume;
1520 }
1521
1522 /* Called from main thread */
1523 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1524 pa_cvolume old_real_volume;
1525
1526 pa_sink_assert_ref(s);
1527 pa_assert_ctl_context();
1528 pa_assert(PA_SINK_IS_LINKED(s->state));
1529
1530 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1531
1532 old_real_volume = s->real_volume;
1533 s->real_volume = *new_real_volume;
1534
1535 propagate_real_volume(s, &old_real_volume);
1536 }
1537
1538 /* Called from main thread */
1539 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1540 pa_bool_t old_muted;
1541
1542 pa_sink_assert_ref(s);
1543 pa_assert_ctl_context();
1544 pa_assert(PA_SINK_IS_LINKED(s->state));
1545
1546 old_muted = s->muted;
1547 s->muted = mute;
1548 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1549
1550 if (s->set_mute)
1551 s->set_mute(s);
1552
1553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1554
1555 if (old_muted != s->muted)
1556 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1557 }
1558
1559 /* Called from main thread */
1560 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1561
1562 pa_sink_assert_ref(s);
1563 pa_assert_ctl_context();
1564 pa_assert(PA_SINK_IS_LINKED(s->state));
1565
1566 if (s->refresh_muted || force_refresh) {
1567 pa_bool_t old_muted = s->muted;
1568
1569 if (s->get_mute)
1570 s->get_mute(s);
1571
1572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1573
1574 if (old_muted != s->muted) {
1575 s->save_muted = TRUE;
1576
1577 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1578
1579 /* Make sure the soft mute status stays in sync */
1580 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1581 }
1582 }
1583
1584 return s->muted;
1585 }
1586
1587 /* Called from main thread */
1588 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1589 pa_sink_assert_ref(s);
1590 pa_assert_ctl_context();
1591 pa_assert(PA_SINK_IS_LINKED(s->state));
1592
1593 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1594
1595 if (s->muted == new_muted)
1596 return;
1597
1598 s->muted = new_muted;
1599 s->save_muted = TRUE;
1600
1601 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1602 }
1603
1604 /* Called from main thread */
1605 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1606 pa_sink_assert_ref(s);
1607 pa_assert_ctl_context();
1608
1609 if (p)
1610 pa_proplist_update(s->proplist, mode, p);
1611
1612 if (PA_SINK_IS_LINKED(s->state)) {
1613 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1614 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1615 }
1616
1617 return TRUE;
1618 }
1619
1620 /* Called from main thread */
1621 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1622 void pa_sink_set_description(pa_sink *s, const char *description) {
1623 const char *old;
1624 pa_sink_assert_ref(s);
1625 pa_assert_ctl_context();
1626
1627 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1628 return;
1629
1630 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1631
1632 if (old && description && pa_streq(old, description))
1633 return;
1634
1635 if (description)
1636 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1637 else
1638 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1639
1640 if (s->monitor_source) {
1641 char *n;
1642
1643 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1644 pa_source_set_description(s->monitor_source, n);
1645 pa_xfree(n);
1646 }
1647
1648 if (PA_SINK_IS_LINKED(s->state)) {
1649 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1650 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1651 }
1652 }
1653
1654 /* Called from main thread */
1655 unsigned pa_sink_linked_by(pa_sink *s) {
1656 unsigned ret;
1657
1658 pa_sink_assert_ref(s);
1659 pa_assert_ctl_context();
1660 pa_assert(PA_SINK_IS_LINKED(s->state));
1661
1662 ret = pa_idxset_size(s->inputs);
1663
1664 /* We add in the number of streams connected to us here. Please
1665 * note the asymmmetry to pa_sink_used_by()! */
1666
1667 if (s->monitor_source)
1668 ret += pa_source_linked_by(s->monitor_source);
1669
1670 return ret;
1671 }
1672
1673 /* Called from main thread */
1674 unsigned pa_sink_used_by(pa_sink *s) {
1675 unsigned ret;
1676
1677 pa_sink_assert_ref(s);
1678 pa_assert_ctl_context();
1679 pa_assert(PA_SINK_IS_LINKED(s->state));
1680
1681 ret = pa_idxset_size(s->inputs);
1682 pa_assert(ret >= s->n_corked);
1683
1684 /* Streams connected to our monitor source do not matter for
1685 * pa_sink_used_by()!.*/
1686
1687 return ret - s->n_corked;
1688 }
1689
1690 /* Called from main thread */
1691 unsigned pa_sink_check_suspend(pa_sink *s) {
1692 unsigned ret;
1693 pa_sink_input *i;
1694 uint32_t idx;
1695
1696 pa_sink_assert_ref(s);
1697 pa_assert_ctl_context();
1698
1699 if (!PA_SINK_IS_LINKED(s->state))
1700 return 0;
1701
1702 ret = 0;
1703
1704 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1705 pa_sink_input_state_t st;
1706
1707 st = pa_sink_input_get_state(i);
1708 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1709
1710 if (st == PA_SINK_INPUT_CORKED)
1711 continue;
1712
1713 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1714 continue;
1715
1716 ret ++;
1717 }
1718
1719 if (s->monitor_source)
1720 ret += pa_source_check_suspend(s->monitor_source);
1721
1722 return ret;
1723 }
1724
1725 /* Called from the IO thread */
1726 static void sync_input_volumes_within_thread(pa_sink *s) {
1727 pa_sink_input *i;
1728 void *state = NULL;
1729
1730 pa_sink_assert_ref(s);
1731 pa_sink_assert_io_context(s);
1732
1733 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1734 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1735 continue;
1736
1737 i->thread_info.soft_volume = i->soft_volume;
1738 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1739 }
1740 }
1741
1742 /* Called from IO thread, except when it is not */
1743 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1744 pa_sink *s = PA_SINK(o);
1745 pa_sink_assert_ref(s);
1746
1747 switch ((pa_sink_message_t) code) {
1748
1749 case PA_SINK_MESSAGE_ADD_INPUT: {
1750 pa_sink_input *i = PA_SINK_INPUT(userdata);
1751
1752 /* If you change anything here, make sure to change the
1753 * sink input handling a few lines down at
1754 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1755
1756 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1757
1758 /* Since the caller sleeps in pa_sink_input_put(), we can
1759 * safely access data outside of thread_info even though
1760 * it is mutable */
1761
1762 if ((i->thread_info.sync_prev = i->sync_prev)) {
1763 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1764 pa_assert(i->sync_prev->sync_next == i);
1765 i->thread_info.sync_prev->thread_info.sync_next = i;
1766 }
1767
1768 if ((i->thread_info.sync_next = i->sync_next)) {
1769 pa_assert(i->sink == i->thread_info.sync_next->sink);
1770 pa_assert(i->sync_next->sync_prev == i);
1771 i->thread_info.sync_next->thread_info.sync_prev = i;
1772 }
1773
1774 pa_assert(!i->thread_info.attached);
1775 i->thread_info.attached = TRUE;
1776
1777 if (i->attach)
1778 i->attach(i);
1779
1780 pa_sink_input_set_state_within_thread(i, i->state);
1781
1782 /* The requested latency of the sink input needs to be
1783 * fixed up and then configured on the sink */
1784
1785 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1786 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1787
1788 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1789 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1790
1791 /* We don't rewind here automatically. This is left to the
1792 * sink input implementor because some sink inputs need a
1793 * slow start, i.e. need some time to buffer client
1794 * samples before beginning streaming. */
1795
1796 /* In flat volume mode we need to update the volume as
1797 * well */
1798 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1799 }
1800
1801 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1802 pa_sink_input *i = PA_SINK_INPUT(userdata);
1803
1804 /* If you change anything here, make sure to change the
1805 * sink input handling a few lines down at
1806 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1807
1808 if (i->detach)
1809 i->detach(i);
1810
1811 pa_sink_input_set_state_within_thread(i, i->state);
1812
1813 pa_assert(i->thread_info.attached);
1814 i->thread_info.attached = FALSE;
1815
1816 /* Since the caller sleeps in pa_sink_input_unlink(),
1817 * we can safely access data outside of thread_info even
1818 * though it is mutable */
1819
1820 pa_assert(!i->sync_prev);
1821 pa_assert(!i->sync_next);
1822
1823 if (i->thread_info.sync_prev) {
1824 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1825 i->thread_info.sync_prev = NULL;
1826 }
1827
1828 if (i->thread_info.sync_next) {
1829 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1830 i->thread_info.sync_next = NULL;
1831 }
1832
1833 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1834 pa_sink_input_unref(i);
1835
1836 pa_sink_invalidate_requested_latency(s, TRUE);
1837 pa_sink_request_rewind(s, (size_t) -1);
1838
1839 /* In flat volume mode we need to update the volume as
1840 * well */
1841 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1842 }
1843
1844 case PA_SINK_MESSAGE_START_MOVE: {
1845 pa_sink_input *i = PA_SINK_INPUT(userdata);
1846
1847 /* We don't support moving synchronized streams. */
1848 pa_assert(!i->sync_prev);
1849 pa_assert(!i->sync_next);
1850 pa_assert(!i->thread_info.sync_next);
1851 pa_assert(!i->thread_info.sync_prev);
1852
1853 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1854 pa_usec_t usec = 0;
1855 size_t sink_nbytes, total_nbytes;
1856
1857 /* Get the latency of the sink */
1858 usec = pa_sink_get_latency_within_thread(s);
1859 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1860 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1861
1862 if (total_nbytes > 0) {
1863 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1864 i->thread_info.rewrite_flush = TRUE;
1865 pa_sink_input_process_rewind(i, sink_nbytes);
1866 }
1867 }
1868
1869 if (i->detach)
1870 i->detach(i);
1871
1872 pa_assert(i->thread_info.attached);
1873 i->thread_info.attached = FALSE;
1874
1875 /* Let's remove the sink input ...*/
1876 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1877 pa_sink_input_unref(i);
1878
1879 pa_sink_invalidate_requested_latency(s, TRUE);
1880
1881 pa_log_debug("Requesting rewind due to started move");
1882 pa_sink_request_rewind(s, (size_t) -1);
1883
1884 /* In flat volume mode we need to update the volume as
1885 * well */
1886 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1887 }
1888
1889 case PA_SINK_MESSAGE_FINISH_MOVE: {
1890 pa_sink_input *i = PA_SINK_INPUT(userdata);
1891
1892 /* We don't support moving synchronized streams. */
1893 pa_assert(!i->sync_prev);
1894 pa_assert(!i->sync_next);
1895 pa_assert(!i->thread_info.sync_next);
1896 pa_assert(!i->thread_info.sync_prev);
1897
1898 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1899
1900 pa_assert(!i->thread_info.attached);
1901 i->thread_info.attached = TRUE;
1902
1903 if (i->attach)
1904 i->attach(i);
1905
1906 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1907 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1908
1909 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1910 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1911
1912 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1913 pa_usec_t usec = 0;
1914 size_t nbytes;
1915
1916 /* Get the latency of the sink */
1917 usec = pa_sink_get_latency_within_thread(s);
1918 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1919
1920 if (nbytes > 0)
1921 pa_sink_input_drop(i, nbytes);
1922
1923 pa_log_debug("Requesting rewind due to finished move");
1924 pa_sink_request_rewind(s, nbytes);
1925 }
1926
1927 /* In flat volume mode we need to update the volume as
1928 * well */
1929 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1930 }
1931
1932 case PA_SINK_MESSAGE_SET_VOLUME:
1933
1934 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1935 s->thread_info.soft_volume = s->soft_volume;
1936 pa_sink_request_rewind(s, (size_t) -1);
1937 }
1938
1939 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1940 return 0;
1941
1942 /* Fall through ... */
1943
1944 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1945 sync_input_volumes_within_thread(s);
1946 return 0;
1947
1948 case PA_SINK_MESSAGE_GET_VOLUME:
1949 return 0;
1950
1951 case PA_SINK_MESSAGE_SET_MUTE:
1952
1953 if (s->thread_info.soft_muted != s->muted) {
1954 s->thread_info.soft_muted = s->muted;
1955 pa_sink_request_rewind(s, (size_t) -1);
1956 }
1957
1958 return 0;
1959
1960 case PA_SINK_MESSAGE_GET_MUTE:
1961 return 0;
1962
1963 case PA_SINK_MESSAGE_SET_STATE: {
1964
1965 pa_bool_t suspend_change =
1966 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1967 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1968
1969 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1970
1971 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1972 s->thread_info.rewind_nbytes = 0;
1973 s->thread_info.rewind_requested = FALSE;
1974 }
1975
1976 if (suspend_change) {
1977 pa_sink_input *i;
1978 void *state = NULL;
1979
1980 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1981 if (i->suspend_within_thread)
1982 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1983 }
1984
1985 return 0;
1986 }
1987
1988 case PA_SINK_MESSAGE_DETACH:
1989
1990 /* Detach all streams */
1991 pa_sink_detach_within_thread(s);
1992 return 0;
1993
1994 case PA_SINK_MESSAGE_ATTACH:
1995
1996 /* Reattach all streams */
1997 pa_sink_attach_within_thread(s);
1998 return 0;
1999
2000 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2001
2002 pa_usec_t *usec = userdata;
2003 *usec = pa_sink_get_requested_latency_within_thread(s);
2004
2005 /* Yes, that's right, the IO thread will see -1 when no
2006 * explicit requested latency is configured, the main
2007 * thread will see max_latency */
2008 if (*usec == (pa_usec_t) -1)
2009 *usec = s->thread_info.max_latency;
2010
2011 return 0;
2012 }
2013
2014 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2015 pa_usec_t *r = userdata;
2016
2017 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2018
2019 return 0;
2020 }
2021
2022 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2023 pa_usec_t *r = userdata;
2024
2025 r[0] = s->thread_info.min_latency;
2026 r[1] = s->thread_info.max_latency;
2027
2028 return 0;
2029 }
2030
2031 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2032
2033 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2034 return 0;
2035
2036 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2037
2038 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2039 return 0;
2040
2041 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2042
2043 *((size_t*) userdata) = s->thread_info.max_rewind;
2044 return 0;
2045
2046 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2047
2048 *((size_t*) userdata) = s->thread_info.max_request;
2049 return 0;
2050
2051 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2052
2053 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2054 return 0;
2055
2056 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2057
2058 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2059 return 0;
2060
2061 case PA_SINK_MESSAGE_GET_LATENCY:
2062 case PA_SINK_MESSAGE_MAX:
2063 ;
2064 }
2065
2066 return -1;
2067 }
2068
2069 /* Called from main thread */
2070 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2071 pa_sink *sink;
2072 uint32_t idx;
2073 int ret = 0;
2074
2075 pa_core_assert_ref(c);
2076 pa_assert_ctl_context();
2077 pa_assert(cause != 0);
2078
2079 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2080 int r;
2081
2082 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2083 ret = r;
2084 }
2085
2086 return ret;
2087 }
2088
2089 /* Called from main thread */
2090 void pa_sink_detach(pa_sink *s) {
2091 pa_sink_assert_ref(s);
2092 pa_assert_ctl_context();
2093 pa_assert(PA_SINK_IS_LINKED(s->state));
2094
2095 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2096 }
2097
2098 /* Called from main thread */
2099 void pa_sink_attach(pa_sink *s) {
2100 pa_sink_assert_ref(s);
2101 pa_assert_ctl_context();
2102 pa_assert(PA_SINK_IS_LINKED(s->state));
2103
2104 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2105 }
2106
2107 /* Called from IO thread */
2108 void pa_sink_detach_within_thread(pa_sink *s) {
2109 pa_sink_input *i;
2110 void *state = NULL;
2111
2112 pa_sink_assert_ref(s);
2113 pa_sink_assert_io_context(s);
2114 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2115
2116 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2117 if (i->detach)
2118 i->detach(i);
2119
2120 if (s->monitor_source)
2121 pa_source_detach_within_thread(s->monitor_source);
2122 }
2123
2124 /* Called from IO thread */
2125 void pa_sink_attach_within_thread(pa_sink *s) {
2126 pa_sink_input *i;
2127 void *state = NULL;
2128
2129 pa_sink_assert_ref(s);
2130 pa_sink_assert_io_context(s);
2131 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2132
2133 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2134 if (i->attach)
2135 i->attach(i);
2136
2137 if (s->monitor_source)
2138 pa_source_attach_within_thread(s->monitor_source);
2139 }
2140
2141 /* Called from IO thread */
2142 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2143 pa_sink_assert_ref(s);
2144 pa_sink_assert_io_context(s);
2145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2146
2147 if (s->thread_info.state == PA_SINK_SUSPENDED)
2148 return;
2149
2150 if (nbytes == (size_t) -1)
2151 nbytes = s->thread_info.max_rewind;
2152
2153 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2154
2155 if (s->thread_info.rewind_requested &&
2156 nbytes <= s->thread_info.rewind_nbytes)
2157 return;
2158
2159 s->thread_info.rewind_nbytes = nbytes;
2160 s->thread_info.rewind_requested = TRUE;
2161
2162 if (s->request_rewind)
2163 s->request_rewind(s);
2164 }
2165
2166 /* Called from IO thread */
2167 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2168 pa_usec_t result = (pa_usec_t) -1;
2169 pa_sink_input *i;
2170 void *state = NULL;
2171 pa_usec_t monitor_latency;
2172
2173 pa_sink_assert_ref(s);
2174 pa_sink_assert_io_context(s);
2175
2176 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2177 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2178
2179 if (s->thread_info.requested_latency_valid)
2180 return s->thread_info.requested_latency;
2181
2182 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2183 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2184 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2185 result = i->thread_info.requested_sink_latency;
2186
2187 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2188
2189 if (monitor_latency != (pa_usec_t) -1 &&
2190 (result == (pa_usec_t) -1 || result > monitor_latency))
2191 result = monitor_latency;
2192
2193 if (result != (pa_usec_t) -1)
2194 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2195
2196 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2197 /* Only cache if properly initialized */
2198 s->thread_info.requested_latency = result;
2199 s->thread_info.requested_latency_valid = TRUE;
2200 }
2201
2202 return result;
2203 }
2204
2205 /* Called from main thread */
2206 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2207 pa_usec_t usec = 0;
2208
2209 pa_sink_assert_ref(s);
2210 pa_assert_ctl_context();
2211 pa_assert(PA_SINK_IS_LINKED(s->state));
2212
2213 if (s->state == PA_SINK_SUSPENDED)
2214 return 0;
2215
2216 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2217 return usec;
2218 }
2219
2220 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2221 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2222 pa_sink_input *i;
2223 void *state = NULL;
2224
2225 pa_sink_assert_ref(s);
2226 pa_sink_assert_io_context(s);
2227
2228 if (max_rewind == s->thread_info.max_rewind)
2229 return;
2230
2231 s->thread_info.max_rewind = max_rewind;
2232
2233 if (PA_SINK_IS_LINKED(s->thread_info.state))
2234 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2235 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2236
2237 if (s->monitor_source)
2238 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2239 }
2240
2241 /* Called from main thread */
2242 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2243 pa_sink_assert_ref(s);
2244 pa_assert_ctl_context();
2245
2246 if (PA_SINK_IS_LINKED(s->state))
2247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2248 else
2249 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2250 }
2251
2252 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2253 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2254 void *state = NULL;
2255
2256 pa_sink_assert_ref(s);
2257 pa_sink_assert_io_context(s);
2258
2259 if (max_request == s->thread_info.max_request)
2260 return;
2261
2262 s->thread_info.max_request = max_request;
2263
2264 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2265 pa_sink_input *i;
2266
2267 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2268 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2269 }
2270 }
2271
2272 /* Called from main thread */
2273 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2274 pa_sink_assert_ref(s);
2275 pa_assert_ctl_context();
2276
2277 if (PA_SINK_IS_LINKED(s->state))
2278 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2279 else
2280 pa_sink_set_max_request_within_thread(s, max_request);
2281 }
2282
2283 /* Called from IO thread */
2284 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2285 pa_sink_input *i;
2286 void *state = NULL;
2287
2288 pa_sink_assert_ref(s);
2289 pa_sink_assert_io_context(s);
2290
2291 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2292 s->thread_info.requested_latency_valid = FALSE;
2293 else if (dynamic)
2294 return;
2295
2296 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2297
2298 if (s->update_requested_latency)
2299 s->update_requested_latency(s);
2300
2301 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2302 if (i->update_sink_requested_latency)
2303 i->update_sink_requested_latency(i);
2304 }
2305 }
2306
2307 /* Called from main thread */
2308 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2309 pa_sink_assert_ref(s);
2310 pa_assert_ctl_context();
2311
2312 /* min_latency == 0: no limit
2313 * min_latency anything else: specified limit
2314 *
2315 * Similar for max_latency */
2316
2317 if (min_latency < ABSOLUTE_MIN_LATENCY)
2318 min_latency = ABSOLUTE_MIN_LATENCY;
2319
2320 if (max_latency <= 0 ||
2321 max_latency > ABSOLUTE_MAX_LATENCY)
2322 max_latency = ABSOLUTE_MAX_LATENCY;
2323
2324 pa_assert(min_latency <= max_latency);
2325
2326 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2327 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2328 max_latency == ABSOLUTE_MAX_LATENCY) ||
2329 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2330
2331 if (PA_SINK_IS_LINKED(s->state)) {
2332 pa_usec_t r[2];
2333
2334 r[0] = min_latency;
2335 r[1] = max_latency;
2336
2337 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2338 } else
2339 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2340 }
2341
2342 /* Called from main thread */
2343 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2344 pa_sink_assert_ref(s);
2345 pa_assert_ctl_context();
2346 pa_assert(min_latency);
2347 pa_assert(max_latency);
2348
2349 if (PA_SINK_IS_LINKED(s->state)) {
2350 pa_usec_t r[2] = { 0, 0 };
2351
2352 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2353
2354 *min_latency = r[0];
2355 *max_latency = r[1];
2356 } else {
2357 *min_latency = s->thread_info.min_latency;
2358 *max_latency = s->thread_info.max_latency;
2359 }
2360 }
2361
2362 /* Called from IO thread */
2363 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2364 pa_sink_assert_ref(s);
2365 pa_sink_assert_io_context(s);
2366
2367 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2368 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2369 pa_assert(min_latency <= max_latency);
2370
2371 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2372 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2373 max_latency == ABSOLUTE_MAX_LATENCY) ||
2374 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2375
2376 if (s->thread_info.min_latency == min_latency &&
2377 s->thread_info.max_latency == max_latency)
2378 return;
2379
2380 s->thread_info.min_latency = min_latency;
2381 s->thread_info.max_latency = max_latency;
2382
2383 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2384 pa_sink_input *i;
2385 void *state = NULL;
2386
2387 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2388 if (i->update_sink_latency_range)
2389 i->update_sink_latency_range(i);
2390 }
2391
2392 pa_sink_invalidate_requested_latency(s, FALSE);
2393
2394 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2395 }
2396
2397 /* Called from main thread */
2398 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2399 pa_sink_assert_ref(s);
2400 pa_assert_ctl_context();
2401
2402 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2403 pa_assert(latency == 0);
2404 return;
2405 }
2406
2407 if (latency < ABSOLUTE_MIN_LATENCY)
2408 latency = ABSOLUTE_MIN_LATENCY;
2409
2410 if (latency > ABSOLUTE_MAX_LATENCY)
2411 latency = ABSOLUTE_MAX_LATENCY;
2412
2413 if (PA_SINK_IS_LINKED(s->state))
2414 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2415 else
2416 s->thread_info.fixed_latency = latency;
2417
2418 pa_source_set_fixed_latency(s->monitor_source, latency);
2419 }
2420
2421 /* Called from main thread */
2422 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2423 pa_usec_t latency;
2424
2425 pa_sink_assert_ref(s);
2426 pa_assert_ctl_context();
2427
2428 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2429 return 0;
2430
2431 if (PA_SINK_IS_LINKED(s->state))
2432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2433 else
2434 latency = s->thread_info.fixed_latency;
2435
2436 return latency;
2437 }
2438
2439 /* Called from IO thread */
2440 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2441 pa_sink_assert_ref(s);
2442 pa_sink_assert_io_context(s);
2443
2444 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2445 pa_assert(latency == 0);
2446 return;
2447 }
2448
2449 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2450 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2451
2452 if (s->thread_info.fixed_latency == latency)
2453 return;
2454
2455 s->thread_info.fixed_latency = latency;
2456
2457 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2458 pa_sink_input *i;
2459 void *state = NULL;
2460
2461 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2462 if (i->update_sink_fixed_latency)
2463 i->update_sink_fixed_latency(i);
2464 }
2465
2466 pa_sink_invalidate_requested_latency(s, FALSE);
2467
2468 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2469 }
2470
2471 /* Called from main context */
2472 size_t pa_sink_get_max_rewind(pa_sink *s) {
2473 size_t r;
2474 pa_sink_assert_ref(s);
2475 pa_assert_ctl_context();
2476
2477 if (!PA_SINK_IS_LINKED(s->state))
2478 return s->thread_info.max_rewind;
2479
2480 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2481
2482 return r;
2483 }
2484
2485 /* Called from main context */
2486 size_t pa_sink_get_max_request(pa_sink *s) {
2487 size_t r;
2488 pa_sink_assert_ref(s);
2489 pa_assert_ctl_context();
2490
2491 if (!PA_SINK_IS_LINKED(s->state))
2492 return s->thread_info.max_request;
2493
2494 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2495
2496 return r;
2497 }
2498
2499 /* Called from main context */
2500 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2501 pa_device_port *port;
2502
2503 pa_sink_assert_ref(s);
2504 pa_assert_ctl_context();
2505
2506 if (!s->set_port) {
2507 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2508 return -PA_ERR_NOTIMPLEMENTED;
2509 }
2510
2511 if (!s->ports)
2512 return -PA_ERR_NOENTITY;
2513
2514 if (!(port = pa_hashmap_get(s->ports, name)))
2515 return -PA_ERR_NOENTITY;
2516
2517 if (s->active_port == port) {
2518 s->save_port = s->save_port || save;
2519 return 0;
2520 }
2521
2522 if ((s->set_port(s, port)) < 0)
2523 return -PA_ERR_NOENTITY;
2524
2525 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2526
2527 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2528
2529 s->active_port = port;
2530 s->save_port = save;
2531
2532 return 0;
2533 }
2534
2535 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2536 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2537
2538 pa_assert(p);
2539
2540 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2541 return TRUE;
2542
2543 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2544
2545 if (pa_streq(ff, "microphone"))
2546 t = "audio-input-microphone";
2547 else if (pa_streq(ff, "webcam"))
2548 t = "camera-web";
2549 else if (pa_streq(ff, "computer"))
2550 t = "computer";
2551 else if (pa_streq(ff, "handset"))
2552 t = "phone";
2553 else if (pa_streq(ff, "portable"))
2554 t = "multimedia-player";
2555 else if (pa_streq(ff, "tv"))
2556 t = "video-display";
2557
2558 /*
2559 * The following icons are not part of the icon naming spec,
2560 * because Rodney Dawes sucks as the maintainer of that spec.
2561 *
2562 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2563 */
2564 else if (pa_streq(ff, "headset"))
2565 t = "audio-headset";
2566 else if (pa_streq(ff, "headphone"))
2567 t = "audio-headphones";
2568 else if (pa_streq(ff, "speaker"))
2569 t = "audio-speakers";
2570 else if (pa_streq(ff, "hands-free"))
2571 t = "audio-handsfree";
2572 }
2573
2574 if (!t)
2575 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2576 if (pa_streq(c, "modem"))
2577 t = "modem";
2578
2579 if (!t) {
2580 if (is_sink)
2581 t = "audio-card";
2582 else
2583 t = "audio-input-microphone";
2584 }
2585
2586 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2587 if (strstr(profile, "analog"))
2588 s = "-analog";
2589 else if (strstr(profile, "iec958"))
2590 s = "-iec958";
2591 else if (strstr(profile, "hdmi"))
2592 s = "-hdmi";
2593 }
2594
2595 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2596
2597 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2598
2599 return TRUE;
2600 }
2601
2602 pa_bool_t pa_device_init_description(pa_proplist *p) {
2603 const char *s, *d = NULL, *k;
2604 pa_assert(p);
2605
2606 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2607 return TRUE;
2608
2609 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2610 if (pa_streq(s, "internal"))
2611 d = _("Internal Audio");
2612
2613 if (!d)
2614 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2615 if (pa_streq(s, "modem"))
2616 d = _("Modem");
2617
2618 if (!d)
2619 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2620
2621 if (!d)
2622 return FALSE;
2623
2624 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2625
2626 if (d && k)
2627 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2628 else if (d)
2629 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2630
2631 return TRUE;
2632 }
2633
2634 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2635 const char *s;
2636 pa_assert(p);
2637
2638 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2639 return TRUE;
2640
2641 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2642 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2643 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2644 return TRUE;
2645 }
2646
2647 return FALSE;
2648 }
2649
2650 unsigned pa_device_init_priority(pa_proplist *p) {
2651 const char *s;
2652 unsigned priority = 0;
2653
2654 pa_assert(p);
2655
2656 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2657
2658 if (pa_streq(s, "sound"))
2659 priority += 9000;
2660 else if (!pa_streq(s, "modem"))
2661 priority += 1000;
2662 }
2663
2664 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2665
2666 if (pa_streq(s, "internal"))
2667 priority += 900;
2668 else if (pa_streq(s, "speaker"))
2669 priority += 500;
2670 else if (pa_streq(s, "headphone"))
2671 priority += 400;
2672 }
2673
2674 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2675
2676 if (pa_streq(s, "pci"))
2677 priority += 50;
2678 else if (pa_streq(s, "usb"))
2679 priority += 40;
2680 else if (pa_streq(s, "bluetooth"))
2681 priority += 30;
2682 }
2683
2684 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2685
2686 if (pa_startswith(s, "analog-"))
2687 priority += 9;
2688 else if (pa_startswith(s, "iec958-"))
2689 priority += 8;
2690 }
2691
2692 return priority;
2693 }