]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Move i18n.[ch] to src/pulsecore
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
50
51 #include "sink.h"
52
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
58
59 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
60
61 struct pa_sink_volume_change {
62 pa_usec_t at;
63 pa_cvolume hw_volume;
64
65 PA_LLIST_FIELDS(pa_sink_volume_change);
66 };
67
68 struct sink_message_set_port {
69 pa_device_port *port;
70 int ret;
71 };
72
73 static void sink_free(pa_object *s);
74
75 static void pa_sink_volume_change_push(pa_sink *s);
76 static void pa_sink_volume_change_flush(pa_sink *s);
77 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
78
79 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
80 pa_assert(data);
81
82 pa_zero(*data);
83 data->proplist = pa_proplist_new();
84
85 return data;
86 }
87
88 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
89 pa_assert(data);
90
91 pa_xfree(data->name);
92 data->name = pa_xstrdup(name);
93 }
94
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
96 pa_assert(data);
97
98 if ((data->sample_spec_is_set = !!spec))
99 data->sample_spec = *spec;
100 }
101
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
103 pa_assert(data);
104
105 if ((data->channel_map_is_set = !!map))
106 data->channel_map = *map;
107 }
108
109 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
110 pa_assert(data);
111
112 if ((data->volume_is_set = !!volume))
113 data->volume = *volume;
114 }
115
116 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
117 pa_assert(data);
118
119 data->muted_is_set = TRUE;
120 data->muted = !!mute;
121 }
122
123 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
124 pa_assert(data);
125
126 pa_xfree(data->active_port);
127 data->active_port = pa_xstrdup(port);
128 }
129
130 void pa_sink_new_data_done(pa_sink_new_data *data) {
131 pa_assert(data);
132
133 pa_proplist_free(data->proplist);
134
135 if (data->ports) {
136 pa_device_port *p;
137
138 while ((p = pa_hashmap_steal_first(data->ports)))
139 pa_device_port_free(p);
140
141 pa_hashmap_free(data->ports, NULL, NULL);
142 }
143
144 pa_xfree(data->name);
145 pa_xfree(data->active_port);
146 }
147
148 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
149 pa_device_port *p;
150
151 pa_assert(name);
152
153 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
154 p->name = pa_xstrdup(name);
155 p->description = pa_xstrdup(description);
156
157 p->priority = 0;
158
159 return p;
160 }
161
162 void pa_device_port_free(pa_device_port *p) {
163 pa_assert(p);
164
165 pa_xfree(p->name);
166 pa_xfree(p->description);
167 pa_xfree(p);
168 }
169
170 /* Called from main context */
171 static void reset_callbacks(pa_sink *s) {
172 pa_assert(s);
173
174 s->set_state = NULL;
175 s->get_volume = NULL;
176 s->set_volume = NULL;
177 s->get_mute = NULL;
178 s->set_mute = NULL;
179 s->request_rewind = NULL;
180 s->update_requested_latency = NULL;
181 s->set_port = NULL;
182 s->get_formats = NULL;
183 }
184
185 /* Called from main context */
186 pa_sink* pa_sink_new(
187 pa_core *core,
188 pa_sink_new_data *data,
189 pa_sink_flags_t flags) {
190
191 pa_sink *s;
192 const char *name;
193 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
194 pa_source_new_data source_data;
195 const char *dn;
196 char *pt;
197
198 pa_assert(core);
199 pa_assert(data);
200 pa_assert(data->name);
201 pa_assert_ctl_context();
202
203 s = pa_msgobject_new(pa_sink);
204
205 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
206 pa_log_debug("Failed to register name %s.", data->name);
207 pa_xfree(s);
208 return NULL;
209 }
210
211 pa_sink_new_data_set_name(data, name);
212
213 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
214 pa_xfree(s);
215 pa_namereg_unregister(core, name);
216 return NULL;
217 }
218
219 /* FIXME, need to free s here on failure */
220
221 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
222 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
223
224 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
225
226 if (!data->channel_map_is_set)
227 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
228
229 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
230 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
231
232 /* FIXME: There should probably be a general function for checking whether
233 * the sink volume is allowed to be set, like there is for sink inputs. */
234 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
235
236 if (!data->volume_is_set) {
237 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
238 data->save_volume = FALSE;
239 }
240
241 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
242 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
243
244 if (!data->muted_is_set)
245 data->muted = FALSE;
246
247 if (data->card)
248 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
249
250 pa_device_init_description(data->proplist);
251 pa_device_init_icon(data->proplist, TRUE);
252 pa_device_init_intended_roles(data->proplist);
253
254 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
255 pa_xfree(s);
256 pa_namereg_unregister(core, name);
257 return NULL;
258 }
259
260 s->parent.parent.free = sink_free;
261 s->parent.process_msg = pa_sink_process_msg;
262
263 s->core = core;
264 s->state = PA_SINK_INIT;
265 s->flags = flags;
266 s->priority = 0;
267 s->suspend_cause = 0;
268 s->name = pa_xstrdup(name);
269 s->proplist = pa_proplist_copy(data->proplist);
270 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
271 s->module = data->module;
272 s->card = data->card;
273
274 s->priority = pa_device_init_priority(s->proplist);
275
276 s->sample_spec = data->sample_spec;
277 s->channel_map = data->channel_map;
278
279 s->inputs = pa_idxset_new(NULL, NULL);
280 s->n_corked = 0;
281 s->input_to_master = NULL;
282
283 s->reference_volume = s->real_volume = data->volume;
284 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
285 s->base_volume = PA_VOLUME_NORM;
286 s->n_volume_steps = PA_VOLUME_NORM+1;
287 s->muted = data->muted;
288 s->refresh_volume = s->refresh_muted = FALSE;
289
290 reset_callbacks(s);
291 s->userdata = NULL;
292
293 s->asyncmsgq = NULL;
294
295 /* As a minor optimization we just steal the list instead of
296 * copying it here */
297 s->ports = data->ports;
298 data->ports = NULL;
299
300 s->active_port = NULL;
301 s->save_port = FALSE;
302
303 if (data->active_port && s->ports)
304 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
305 s->save_port = data->save_port;
306
307 if (!s->active_port && s->ports) {
308 void *state;
309 pa_device_port *p;
310
311 PA_HASHMAP_FOREACH(p, s->ports, state)
312 if (!s->active_port || p->priority > s->active_port->priority)
313 s->active_port = p;
314 }
315
316 s->save_volume = data->save_volume;
317 s->save_muted = data->save_muted;
318
319 pa_silence_memchunk_get(
320 &core->silence_cache,
321 core->mempool,
322 &s->silence,
323 &s->sample_spec,
324 0);
325
326 s->thread_info.rtpoll = NULL;
327 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
328 s->thread_info.soft_volume = s->soft_volume;
329 s->thread_info.soft_muted = s->muted;
330 s->thread_info.state = s->state;
331 s->thread_info.rewind_nbytes = 0;
332 s->thread_info.rewind_requested = FALSE;
333 s->thread_info.max_rewind = 0;
334 s->thread_info.max_request = 0;
335 s->thread_info.requested_latency_valid = FALSE;
336 s->thread_info.requested_latency = 0;
337 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
338 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
339 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
340
341 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
342 s->thread_info.volume_changes_tail = NULL;
343 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
344 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
345 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
346
347 /* FIXME: This should probably be moved to pa_sink_put() */
348 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
349
350 if (s->card)
351 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
352
353 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
354 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
355 s->index,
356 s->name,
357 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
358 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
359 pt);
360 pa_xfree(pt);
361
362 pa_source_new_data_init(&source_data);
363 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
364 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
365 source_data.name = pa_sprintf_malloc("%s.monitor", name);
366 source_data.driver = data->driver;
367 source_data.module = data->module;
368 source_data.card = data->card;
369
370 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
371 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
372 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
373
374 s->monitor_source = pa_source_new(core, &source_data,
375 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
376 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
377
378 pa_source_new_data_done(&source_data);
379
380 if (!s->monitor_source) {
381 pa_sink_unlink(s);
382 pa_sink_unref(s);
383 return NULL;
384 }
385
386 s->monitor_source->monitor_of = s;
387
388 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
389 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
390 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
391
392 return s;
393 }
394
395 /* Called from main context */
396 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
397 int ret;
398 pa_bool_t suspend_change;
399 pa_sink_state_t original_state;
400
401 pa_assert(s);
402 pa_assert_ctl_context();
403
404 if (s->state == state)
405 return 0;
406
407 original_state = s->state;
408
409 suspend_change =
410 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
411 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
412
413 if (s->set_state)
414 if ((ret = s->set_state(s, state)) < 0)
415 return ret;
416
417 if (s->asyncmsgq)
418 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
419
420 if (s->set_state)
421 s->set_state(s, original_state);
422
423 return ret;
424 }
425
426 s->state = state;
427
428 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
429 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
431 }
432
433 if (suspend_change) {
434 pa_sink_input *i;
435 uint32_t idx;
436
437 /* We're suspending or resuming, tell everyone about it */
438
439 PA_IDXSET_FOREACH(i, s->inputs, idx)
440 if (s->state == PA_SINK_SUSPENDED &&
441 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
442 pa_sink_input_kill(i);
443 else if (i->suspend)
444 i->suspend(i, state == PA_SINK_SUSPENDED);
445
446 if (s->monitor_source)
447 pa_source_sync_suspend(s->monitor_source);
448 }
449
450 return 0;
451 }
452
453 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
454 pa_assert(s);
455
456 s->get_volume = cb;
457 }
458
459 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
460 pa_sink_flags_t flags;
461
462 pa_assert(s);
463 pa_assert(!s->write_volume || cb);
464
465 s->set_volume = cb;
466
467 /* Save the current flags so we can tell if they've changed */
468 flags = s->flags;
469
470 if (cb) {
471 /* The sink implementor is responsible for setting decibel volume support */
472 s->flags |= PA_SINK_HW_VOLUME_CTRL;
473 } else {
474 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
475 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
476 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
477 }
478
479 /* If the flags have changed after init, let any clients know via a change event */
480 if (s->state != PA_SINK_INIT && flags != s->flags)
481 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
482 }
483
484 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
485 pa_sink_flags_t flags;
486
487 pa_assert(s);
488 pa_assert(!cb || s->set_volume);
489
490 s->write_volume = cb;
491
492 /* Save the current flags so we can tell if they've changed */
493 flags = s->flags;
494
495 if (cb)
496 s->flags |= PA_SINK_SYNC_VOLUME;
497 else
498 s->flags &= ~PA_SINK_SYNC_VOLUME;
499
500 /* If the flags have changed after init, let any clients know via a change event */
501 if (s->state != PA_SINK_INIT && flags != s->flags)
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
503 }
504
505 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
506 pa_assert(s);
507
508 s->get_mute = cb;
509 }
510
511 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
512 pa_sink_flags_t flags;
513
514 pa_assert(s);
515
516 s->set_mute = cb;
517
518 /* Save the current flags so we can tell if they've changed */
519 flags = s->flags;
520
521 if (cb)
522 s->flags |= PA_SINK_HW_MUTE_CTRL;
523 else
524 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
525
526 /* If the flags have changed after init, let any clients know via a change event */
527 if (s->state != PA_SINK_INIT && flags != s->flags)
528 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
529 }
530
531 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
532 pa_sink_flags_t flags;
533
534 pa_assert(s);
535
536 /* Always follow the overall user preference here */
537 enable = enable && s->core->flat_volumes;
538
539 /* Save the current flags so we can tell if they've changed */
540 flags = s->flags;
541
542 if (enable)
543 s->flags |= PA_SINK_FLAT_VOLUME;
544 else
545 s->flags &= ~PA_SINK_FLAT_VOLUME;
546
547 /* If the flags have changed after init, let any clients know via a change event */
548 if (s->state != PA_SINK_INIT && flags != s->flags)
549 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
550 }
551
552 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
553 pa_sink_flags_t flags;
554
555 pa_assert(s);
556
557 /* Save the current flags so we can tell if they've changed */
558 flags = s->flags;
559
560 if (enable) {
561 s->flags |= PA_SINK_DECIBEL_VOLUME;
562 enable_flat_volume(s, TRUE);
563 } else {
564 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
565 enable_flat_volume(s, FALSE);
566 }
567
568 /* If the flags have changed after init, let any clients know via a change event */
569 if (s->state != PA_SINK_INIT && flags != s->flags)
570 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
571 }
572
573 /* Called from main context */
574 void pa_sink_put(pa_sink* s) {
575 pa_sink_assert_ref(s);
576 pa_assert_ctl_context();
577
578 pa_assert(s->state == PA_SINK_INIT);
579 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
580
581 /* The following fields must be initialized properly when calling _put() */
582 pa_assert(s->asyncmsgq);
583 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
584
585 /* Generally, flags should be initialized via pa_sink_new(). As a
586 * special exception we allow some volume related flags to be set
587 * between _new() and _put() by the callback setter functions above.
588 *
589 * Thus we implement a couple safeguards here which ensure the above
590 * setters were used (or at least the implementor made manual changes
591 * in a compatible way).
592 *
593 * Note: All of these flags set here can change over the life time
594 * of the sink. */
595 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
596 pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
597 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
598
599 /* XXX: Currently decibel volume is disabled for all sinks that use volume
600 * sharing. When the master sink supports decibel volume, it would be good
601 * to have the flag also in the filter sink, but currently we don't do that
602 * so that the flags of the filter sink never change when it's moved from
603 * a master sink to another. One solution for this problem would be to
604 * remove user-visible volume altogether from filter sinks when volume
605 * sharing is used, but the current approach was easier to implement... */
606 /* We always support decibel volumes in software, otherwise we leave it to
607 * the sink implementor to set this flag as needed.
608 *
609 * Note: This flag can also change over the life time of the sink. */
610 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
611 pa_sink_enable_decibel_volume(s, TRUE);
612
613 /* If the sink implementor support DB volumes by itself, we should always
614 * try and enable flat volumes too */
615 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
616 enable_flat_volume(s, TRUE);
617
618 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
619 pa_sink *root_sink = s->input_to_master->sink;
620
621 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
622 root_sink = root_sink->input_to_master->sink;
623
624 s->reference_volume = root_sink->reference_volume;
625 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
626
627 s->real_volume = root_sink->real_volume;
628 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
629 } else
630 /* We assume that if the sink implementor changed the default
631 * volume he did so in real_volume, because that is the usual
632 * place where he is supposed to place his changes. */
633 s->reference_volume = s->real_volume;
634
635 s->thread_info.soft_volume = s->soft_volume;
636 s->thread_info.soft_muted = s->muted;
637 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
638
639 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
640 || (s->base_volume == PA_VOLUME_NORM
641 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
642 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
643 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
644 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
645 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
646
647 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
648 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
649 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
650
651 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
652
653 pa_source_put(s->monitor_source);
654
655 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
656 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
657 }
658
659 /* Called from main context */
660 void pa_sink_unlink(pa_sink* s) {
661 pa_bool_t linked;
662 pa_sink_input *i, *j = NULL;
663
664 pa_assert(s);
665 pa_assert_ctl_context();
666
667 /* Please note that pa_sink_unlink() does more than simply
668 * reversing pa_sink_put(). It also undoes the registrations
669 * already done in pa_sink_new()! */
670
671 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
672 * may be called multiple times on the same sink without bad
673 * effects. */
674
675 linked = PA_SINK_IS_LINKED(s->state);
676
677 if (linked)
678 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
679
680 if (s->state != PA_SINK_UNLINKED)
681 pa_namereg_unregister(s->core, s->name);
682 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
683
684 if (s->card)
685 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
686
687 while ((i = pa_idxset_first(s->inputs, NULL))) {
688 pa_assert(i != j);
689 pa_sink_input_kill(i);
690 j = i;
691 }
692
693 if (linked)
694 sink_set_state(s, PA_SINK_UNLINKED);
695 else
696 s->state = PA_SINK_UNLINKED;
697
698 reset_callbacks(s);
699
700 if (s->monitor_source)
701 pa_source_unlink(s->monitor_source);
702
703 if (linked) {
704 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
705 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
706 }
707 }
708
709 /* Called from main context */
710 static void sink_free(pa_object *o) {
711 pa_sink *s = PA_SINK(o);
712 pa_sink_input *i;
713
714 pa_assert(s);
715 pa_assert_ctl_context();
716 pa_assert(pa_sink_refcnt(s) == 0);
717
718 if (PA_SINK_IS_LINKED(s->state))
719 pa_sink_unlink(s);
720
721 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
722
723 if (s->monitor_source) {
724 pa_source_unref(s->monitor_source);
725 s->monitor_source = NULL;
726 }
727
728 pa_idxset_free(s->inputs, NULL, NULL);
729
730 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
731 pa_sink_input_unref(i);
732
733 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
734
735 if (s->silence.memblock)
736 pa_memblock_unref(s->silence.memblock);
737
738 pa_xfree(s->name);
739 pa_xfree(s->driver);
740
741 if (s->proplist)
742 pa_proplist_free(s->proplist);
743
744 if (s->ports) {
745 pa_device_port *p;
746
747 while ((p = pa_hashmap_steal_first(s->ports)))
748 pa_device_port_free(p);
749
750 pa_hashmap_free(s->ports, NULL, NULL);
751 }
752
753 pa_xfree(s);
754 }
755
756 /* Called from main context, and not while the IO thread is active, please */
757 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
758 pa_sink_assert_ref(s);
759 pa_assert_ctl_context();
760
761 s->asyncmsgq = q;
762
763 if (s->monitor_source)
764 pa_source_set_asyncmsgq(s->monitor_source, q);
765 }
766
767 /* Called from main context, and not while the IO thread is active, please */
768 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
769 pa_sink_assert_ref(s);
770 pa_assert_ctl_context();
771
772 if (mask == 0)
773 return;
774
775 /* For now, allow only a minimal set of flags to be changed. */
776 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
777
778 s->flags = (s->flags & ~mask) | (value & mask);
779
780 pa_source_update_flags(s->monitor_source,
781 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
782 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
783 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
784 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
785 }
786
787 /* Called from IO context, or before _put() from main context */
788 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
789 pa_sink_assert_ref(s);
790 pa_sink_assert_io_context(s);
791
792 s->thread_info.rtpoll = p;
793
794 if (s->monitor_source)
795 pa_source_set_rtpoll(s->monitor_source, p);
796 }
797
798 /* Called from main context */
799 int pa_sink_update_status(pa_sink*s) {
800 pa_sink_assert_ref(s);
801 pa_assert_ctl_context();
802 pa_assert(PA_SINK_IS_LINKED(s->state));
803
804 if (s->state == PA_SINK_SUSPENDED)
805 return 0;
806
807 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
808 }
809
810 /* Called from main context */
811 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
812 pa_sink_assert_ref(s);
813 pa_assert_ctl_context();
814 pa_assert(PA_SINK_IS_LINKED(s->state));
815 pa_assert(cause != 0);
816
817 if (suspend) {
818 s->suspend_cause |= cause;
819 s->monitor_source->suspend_cause |= cause;
820 } else {
821 s->suspend_cause &= ~cause;
822 s->monitor_source->suspend_cause &= ~cause;
823 }
824
825 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
826 return 0;
827
828 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
829
830 if (s->suspend_cause)
831 return sink_set_state(s, PA_SINK_SUSPENDED);
832 else
833 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
834 }
835
836 /* Called from main context */
837 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
838 pa_sink_input *i, *n;
839 uint32_t idx;
840
841 pa_sink_assert_ref(s);
842 pa_assert_ctl_context();
843 pa_assert(PA_SINK_IS_LINKED(s->state));
844
845 if (!q)
846 q = pa_queue_new();
847
848 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
849 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
850
851 pa_sink_input_ref(i);
852
853 if (pa_sink_input_start_move(i) >= 0)
854 pa_queue_push(q, i);
855 else
856 pa_sink_input_unref(i);
857 }
858
859 return q;
860 }
861
862 /* Called from main context */
863 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
864 pa_sink_input *i;
865
866 pa_sink_assert_ref(s);
867 pa_assert_ctl_context();
868 pa_assert(PA_SINK_IS_LINKED(s->state));
869 pa_assert(q);
870
871 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
872 if (pa_sink_input_finish_move(i, s, save) < 0)
873 pa_sink_input_fail_move(i);
874
875 pa_sink_input_unref(i);
876 }
877
878 pa_queue_free(q, NULL, NULL);
879 }
880
881 /* Called from main context */
882 void pa_sink_move_all_fail(pa_queue *q) {
883 pa_sink_input *i;
884
885 pa_assert_ctl_context();
886 pa_assert(q);
887
888 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
889 pa_sink_input_fail_move(i);
890 pa_sink_input_unref(i);
891 }
892
893 pa_queue_free(q, NULL, NULL);
894 }
895
896 /* Called from IO thread context */
897 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
898 pa_sink_input *i;
899 void *state = NULL;
900
901 pa_sink_assert_ref(s);
902 pa_sink_assert_io_context(s);
903 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
904
905 /* If nobody requested this and this is actually no real rewind
906 * then we can short cut this. Please note that this means that
907 * not all rewind requests triggered upstream will always be
908 * translated in actual requests! */
909 if (!s->thread_info.rewind_requested && nbytes <= 0)
910 return;
911
912 s->thread_info.rewind_nbytes = 0;
913 s->thread_info.rewind_requested = FALSE;
914
915 if (s->thread_info.state == PA_SINK_SUSPENDED)
916 return;
917
918 if (nbytes > 0) {
919 pa_log_debug("Processing rewind...");
920 if (s->flags & PA_SINK_SYNC_VOLUME)
921 pa_sink_volume_change_rewind(s, nbytes);
922 }
923
924 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
925 pa_sink_input_assert_ref(i);
926 pa_sink_input_process_rewind(i, nbytes);
927 }
928
929 if (nbytes > 0) {
930 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
931 pa_source_process_rewind(s->monitor_source, nbytes);
932 }
933 }
934
935 /* Called from IO thread context */
936 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
937 pa_sink_input *i;
938 unsigned n = 0;
939 void *state = NULL;
940 size_t mixlength = *length;
941
942 pa_sink_assert_ref(s);
943 pa_sink_assert_io_context(s);
944 pa_assert(info);
945
946 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
947 pa_sink_input_assert_ref(i);
948
949 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
950
951 if (mixlength == 0 || info->chunk.length < mixlength)
952 mixlength = info->chunk.length;
953
954 if (pa_memblock_is_silence(info->chunk.memblock)) {
955 pa_memblock_unref(info->chunk.memblock);
956 continue;
957 }
958
959 info->userdata = pa_sink_input_ref(i);
960
961 pa_assert(info->chunk.memblock);
962 pa_assert(info->chunk.length > 0);
963
964 info++;
965 n++;
966 maxinfo--;
967 }
968
969 if (mixlength > 0)
970 *length = mixlength;
971
972 return n;
973 }
974
975 /* Called from IO thread context */
976 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
977 pa_sink_input *i;
978 void *state;
979 unsigned p = 0;
980 unsigned n_unreffed = 0;
981
982 pa_sink_assert_ref(s);
983 pa_sink_assert_io_context(s);
984 pa_assert(result);
985 pa_assert(result->memblock);
986 pa_assert(result->length > 0);
987
988 /* We optimize for the case where the order of the inputs has not changed */
989
990 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
991 unsigned j;
992 pa_mix_info* m = NULL;
993
994 pa_sink_input_assert_ref(i);
995
996 /* Let's try to find the matching entry info the pa_mix_info array */
997 for (j = 0; j < n; j ++) {
998
999 if (info[p].userdata == i) {
1000 m = info + p;
1001 break;
1002 }
1003
1004 p++;
1005 if (p >= n)
1006 p = 0;
1007 }
1008
1009 /* Drop read data */
1010 pa_sink_input_drop(i, result->length);
1011
1012 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1013
1014 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1015 void *ostate = NULL;
1016 pa_source_output *o;
1017 pa_memchunk c;
1018
1019 if (m && m->chunk.memblock) {
1020 c = m->chunk;
1021 pa_memblock_ref(c.memblock);
1022 pa_assert(result->length <= c.length);
1023 c.length = result->length;
1024
1025 pa_memchunk_make_writable(&c, 0);
1026 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1027 } else {
1028 c = s->silence;
1029 pa_memblock_ref(c.memblock);
1030 pa_assert(result->length <= c.length);
1031 c.length = result->length;
1032 }
1033
1034 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1035 pa_source_output_assert_ref(o);
1036 pa_assert(o->direct_on_input == i);
1037 pa_source_post_direct(s->monitor_source, o, &c);
1038 }
1039
1040 pa_memblock_unref(c.memblock);
1041 }
1042 }
1043
1044 if (m) {
1045 if (m->chunk.memblock)
1046 pa_memblock_unref(m->chunk.memblock);
1047 pa_memchunk_reset(&m->chunk);
1048
1049 pa_sink_input_unref(m->userdata);
1050 m->userdata = NULL;
1051
1052 n_unreffed += 1;
1053 }
1054 }
1055
1056 /* Now drop references to entries that are included in the
1057 * pa_mix_info array but don't exist anymore */
1058
1059 if (n_unreffed < n) {
1060 for (; n > 0; info++, n--) {
1061 if (info->userdata)
1062 pa_sink_input_unref(info->userdata);
1063 if (info->chunk.memblock)
1064 pa_memblock_unref(info->chunk.memblock);
1065 }
1066 }
1067
1068 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1069 pa_source_post(s->monitor_source, result);
1070 }
1071
1072 /* Called from IO thread context */
1073 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1074 pa_mix_info info[MAX_MIX_CHANNELS];
1075 unsigned n;
1076 size_t block_size_max;
1077
1078 pa_sink_assert_ref(s);
1079 pa_sink_assert_io_context(s);
1080 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1081 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1082 pa_assert(result);
1083
1084 pa_assert(!s->thread_info.rewind_requested);
1085 pa_assert(s->thread_info.rewind_nbytes == 0);
1086
1087 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1088 result->memblock = pa_memblock_ref(s->silence.memblock);
1089 result->index = s->silence.index;
1090 result->length = PA_MIN(s->silence.length, length);
1091 return;
1092 }
1093
1094 pa_sink_ref(s);
1095
1096 if (length <= 0)
1097 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1098
1099 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1100 if (length > block_size_max)
1101 length = pa_frame_align(block_size_max, &s->sample_spec);
1102
1103 pa_assert(length > 0);
1104
1105 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1106
1107 if (n == 0) {
1108
1109 *result = s->silence;
1110 pa_memblock_ref(result->memblock);
1111
1112 if (result->length > length)
1113 result->length = length;
1114
1115 } else if (n == 1) {
1116 pa_cvolume volume;
1117
1118 *result = info[0].chunk;
1119 pa_memblock_ref(result->memblock);
1120
1121 if (result->length > length)
1122 result->length = length;
1123
1124 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1125
1126 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1127 pa_memblock_unref(result->memblock);
1128 pa_silence_memchunk_get(&s->core->silence_cache,
1129 s->core->mempool,
1130 result,
1131 &s->sample_spec,
1132 result->length);
1133 } else if (!pa_cvolume_is_norm(&volume)) {
1134 pa_memchunk_make_writable(result, 0);
1135 pa_volume_memchunk(result, &s->sample_spec, &volume);
1136 }
1137 } else {
1138 void *ptr;
1139 result->memblock = pa_memblock_new(s->core->mempool, length);
1140
1141 ptr = pa_memblock_acquire(result->memblock);
1142 result->length = pa_mix(info, n,
1143 ptr, length,
1144 &s->sample_spec,
1145 &s->thread_info.soft_volume,
1146 s->thread_info.soft_muted);
1147 pa_memblock_release(result->memblock);
1148
1149 result->index = 0;
1150 }
1151
1152 inputs_drop(s, info, n, result);
1153
1154 pa_sink_unref(s);
1155 }
1156
1157 /* Called from IO thread context */
1158 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1159 pa_mix_info info[MAX_MIX_CHANNELS];
1160 unsigned n;
1161 size_t length, block_size_max;
1162
1163 pa_sink_assert_ref(s);
1164 pa_sink_assert_io_context(s);
1165 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1166 pa_assert(target);
1167 pa_assert(target->memblock);
1168 pa_assert(target->length > 0);
1169 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1170
1171 pa_assert(!s->thread_info.rewind_requested);
1172 pa_assert(s->thread_info.rewind_nbytes == 0);
1173
1174 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1175 pa_silence_memchunk(target, &s->sample_spec);
1176 return;
1177 }
1178
1179 pa_sink_ref(s);
1180
1181 length = target->length;
1182 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1183 if (length > block_size_max)
1184 length = pa_frame_align(block_size_max, &s->sample_spec);
1185
1186 pa_assert(length > 0);
1187
1188 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1189
1190 if (n == 0) {
1191 if (target->length > length)
1192 target->length = length;
1193
1194 pa_silence_memchunk(target, &s->sample_spec);
1195 } else if (n == 1) {
1196 pa_cvolume volume;
1197
1198 if (target->length > length)
1199 target->length = length;
1200
1201 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1202
1203 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1204 pa_silence_memchunk(target, &s->sample_spec);
1205 else {
1206 pa_memchunk vchunk;
1207
1208 vchunk = info[0].chunk;
1209 pa_memblock_ref(vchunk.memblock);
1210
1211 if (vchunk.length > length)
1212 vchunk.length = length;
1213
1214 if (!pa_cvolume_is_norm(&volume)) {
1215 pa_memchunk_make_writable(&vchunk, 0);
1216 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1217 }
1218
1219 pa_memchunk_memcpy(target, &vchunk);
1220 pa_memblock_unref(vchunk.memblock);
1221 }
1222
1223 } else {
1224 void *ptr;
1225
1226 ptr = pa_memblock_acquire(target->memblock);
1227
1228 target->length = pa_mix(info, n,
1229 (uint8_t*) ptr + target->index, length,
1230 &s->sample_spec,
1231 &s->thread_info.soft_volume,
1232 s->thread_info.soft_muted);
1233
1234 pa_memblock_release(target->memblock);
1235 }
1236
1237 inputs_drop(s, info, n, target);
1238
1239 pa_sink_unref(s);
1240 }
1241
1242 /* Called from IO thread context */
1243 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1244 pa_memchunk chunk;
1245 size_t l, d;
1246
1247 pa_sink_assert_ref(s);
1248 pa_sink_assert_io_context(s);
1249 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1250 pa_assert(target);
1251 pa_assert(target->memblock);
1252 pa_assert(target->length > 0);
1253 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1254
1255 pa_assert(!s->thread_info.rewind_requested);
1256 pa_assert(s->thread_info.rewind_nbytes == 0);
1257
1258 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1259 pa_silence_memchunk(target, &s->sample_spec);
1260 return;
1261 }
1262
1263 pa_sink_ref(s);
1264
1265 l = target->length;
1266 d = 0;
1267 while (l > 0) {
1268 chunk = *target;
1269 chunk.index += d;
1270 chunk.length -= d;
1271
1272 pa_sink_render_into(s, &chunk);
1273
1274 d += chunk.length;
1275 l -= chunk.length;
1276 }
1277
1278 pa_sink_unref(s);
1279 }
1280
1281 /* Called from IO thread context */
1282 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1283 pa_sink_assert_ref(s);
1284 pa_sink_assert_io_context(s);
1285 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1286 pa_assert(length > 0);
1287 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1288 pa_assert(result);
1289
1290 pa_assert(!s->thread_info.rewind_requested);
1291 pa_assert(s->thread_info.rewind_nbytes == 0);
1292
1293 pa_sink_ref(s);
1294
1295 pa_sink_render(s, length, result);
1296
1297 if (result->length < length) {
1298 pa_memchunk chunk;
1299
1300 pa_memchunk_make_writable(result, length);
1301
1302 chunk.memblock = result->memblock;
1303 chunk.index = result->index + result->length;
1304 chunk.length = length - result->length;
1305
1306 pa_sink_render_into_full(s, &chunk);
1307
1308 result->length = length;
1309 }
1310
1311 pa_sink_unref(s);
1312 }
1313
1314 /* Called from main thread */
1315 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1316 pa_usec_t usec = 0;
1317
1318 pa_sink_assert_ref(s);
1319 pa_assert_ctl_context();
1320 pa_assert(PA_SINK_IS_LINKED(s->state));
1321
1322 /* The returned value is supposed to be in the time domain of the sound card! */
1323
1324 if (s->state == PA_SINK_SUSPENDED)
1325 return 0;
1326
1327 if (!(s->flags & PA_SINK_LATENCY))
1328 return 0;
1329
1330 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1331
1332 return usec;
1333 }
1334
1335 /* Called from IO thread */
1336 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1337 pa_usec_t usec = 0;
1338 pa_msgobject *o;
1339
1340 pa_sink_assert_ref(s);
1341 pa_sink_assert_io_context(s);
1342 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1343
1344 /* The returned value is supposed to be in the time domain of the sound card! */
1345
1346 if (s->thread_info.state == PA_SINK_SUSPENDED)
1347 return 0;
1348
1349 if (!(s->flags & PA_SINK_LATENCY))
1350 return 0;
1351
1352 o = PA_MSGOBJECT(s);
1353
1354 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1355
1356 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1357 return -1;
1358
1359 return usec;
1360 }
1361
1362 /* Called from the main thread (and also from the IO thread while the main
1363 * thread is waiting).
1364 *
1365 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1366 * set. Instead, flat volume mode is detected by checking whether the root sink
1367 * has the flag set. */
1368 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1369 pa_sink_assert_ref(s);
1370
1371 while (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1372 s = s->input_to_master->sink;
1373
1374 return (s->flags & PA_SINK_FLAT_VOLUME);
1375 }
1376
1377 /* Called from main context */
1378 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1379 pa_sink_input *alt_i;
1380 uint32_t idx;
1381
1382 pa_sink_assert_ref(s);
1383
1384 /* one and only one PASSTHROUGH input can possibly be connected */
1385 if (pa_idxset_size(s->inputs) == 1) {
1386 alt_i = pa_idxset_first(s->inputs, &idx);
1387
1388 if (pa_sink_input_is_passthrough(alt_i))
1389 return TRUE;
1390 }
1391
1392 return FALSE;
1393 }
1394
1395 /* Called from main context. */
1396 static void compute_reference_ratio(pa_sink_input *i) {
1397 unsigned c = 0;
1398 pa_cvolume remapped;
1399
1400 pa_assert(i);
1401 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1402
1403 /*
1404 * Calculates the reference ratio from the sink's reference
1405 * volume. This basically calculates:
1406 *
1407 * i->reference_ratio = i->volume / i->sink->reference_volume
1408 */
1409
1410 remapped = i->sink->reference_volume;
1411 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1412
1413 i->reference_ratio.channels = i->sample_spec.channels;
1414
1415 for (c = 0; c < i->sample_spec.channels; c++) {
1416
1417 /* We don't update when the sink volume is 0 anyway */
1418 if (remapped.values[c] <= PA_VOLUME_MUTED)
1419 continue;
1420
1421 /* Don't update the reference ratio unless necessary */
1422 if (pa_sw_volume_multiply(
1423 i->reference_ratio.values[c],
1424 remapped.values[c]) == i->volume.values[c])
1425 continue;
1426
1427 i->reference_ratio.values[c] = pa_sw_volume_divide(
1428 i->volume.values[c],
1429 remapped.values[c]);
1430 }
1431 }
1432
1433 /* Called from main context. Only called for the root sink in volume sharing
1434 * cases, except for internal recursive calls. */
1435 static void compute_reference_ratios(pa_sink *s) {
1436 uint32_t idx;
1437 pa_sink_input *i;
1438
1439 pa_sink_assert_ref(s);
1440 pa_assert_ctl_context();
1441 pa_assert(PA_SINK_IS_LINKED(s->state));
1442 pa_assert(pa_sink_flat_volume_enabled(s));
1443
1444 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1445 compute_reference_ratio(i);
1446
1447 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1448 compute_reference_ratios(i->origin_sink);
1449 }
1450 }
1451
1452 /* Called from main context. Only called for the root sink in volume sharing
1453 * cases, except for internal recursive calls. */
1454 static void compute_real_ratios(pa_sink *s) {
1455 pa_sink_input *i;
1456 uint32_t idx;
1457
1458 pa_sink_assert_ref(s);
1459 pa_assert_ctl_context();
1460 pa_assert(PA_SINK_IS_LINKED(s->state));
1461 pa_assert(pa_sink_flat_volume_enabled(s));
1462
1463 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1464 unsigned c;
1465 pa_cvolume remapped;
1466
1467 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1468 /* The origin sink uses volume sharing, so this input's real ratio
1469 * is handled as a special case - the real ratio must be 0 dB, and
1470 * as a result i->soft_volume must equal i->volume_factor. */
1471 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1472 i->soft_volume = i->volume_factor;
1473
1474 compute_real_ratios(i->origin_sink);
1475
1476 continue;
1477 }
1478
1479 /*
1480 * This basically calculates:
1481 *
1482 * i->real_ratio := i->volume / s->real_volume
1483 * i->soft_volume := i->real_ratio * i->volume_factor
1484 */
1485
1486 remapped = s->real_volume;
1487 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1488
1489 i->real_ratio.channels = i->sample_spec.channels;
1490 i->soft_volume.channels = i->sample_spec.channels;
1491
1492 for (c = 0; c < i->sample_spec.channels; c++) {
1493
1494 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1495 /* We leave i->real_ratio untouched */
1496 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1497 continue;
1498 }
1499
1500 /* Don't lose accuracy unless necessary */
1501 if (pa_sw_volume_multiply(
1502 i->real_ratio.values[c],
1503 remapped.values[c]) != i->volume.values[c])
1504
1505 i->real_ratio.values[c] = pa_sw_volume_divide(
1506 i->volume.values[c],
1507 remapped.values[c]);
1508
1509 i->soft_volume.values[c] = pa_sw_volume_multiply(
1510 i->real_ratio.values[c],
1511 i->volume_factor.values[c]);
1512 }
1513
1514 /* We don't copy the soft_volume to the thread_info data
1515 * here. That must be done by the caller */
1516 }
1517 }
1518
1519 static pa_cvolume *cvolume_remap_minimal_impact(
1520 pa_cvolume *v,
1521 const pa_cvolume *template,
1522 const pa_channel_map *from,
1523 const pa_channel_map *to) {
1524
1525 pa_cvolume t;
1526
1527 pa_assert(v);
1528 pa_assert(template);
1529 pa_assert(from);
1530 pa_assert(to);
1531 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1532 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1533
1534 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1535 * mapping from sink input to sink volumes:
1536 *
1537 * If template is a possible remapping from v it is used instead
1538 * of remapping anew.
1539 *
1540 * If the channel maps don't match we set an all-channel volume on
1541 * the sink to ensure that changing a volume on one stream has no
1542 * effect that cannot be compensated for in another stream that
1543 * does not have the same channel map as the sink. */
1544
1545 if (pa_channel_map_equal(from, to))
1546 return v;
1547
1548 t = *template;
1549 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1550 *v = *template;
1551 return v;
1552 }
1553
1554 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1555 return v;
1556 }
1557
1558 /* Called from main thread. Only called for the root sink in volume sharing
1559 * cases, except for internal recursive calls. */
1560 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1561 pa_sink_input *i;
1562 uint32_t idx;
1563
1564 pa_sink_assert_ref(s);
1565 pa_assert(max_volume);
1566 pa_assert(channel_map);
1567 pa_assert(pa_sink_flat_volume_enabled(s));
1568
1569 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1570 pa_cvolume remapped;
1571
1572 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1573 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1574
1575 /* Ignore this input. The origin sink uses volume sharing, so this
1576 * input's volume will be set to be equal to the root sink's real
1577 * volume. Obviously this input's current volume must not then
1578 * affect what the root sink's real volume will be. */
1579 continue;
1580 }
1581
1582 remapped = i->volume;
1583 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1584 pa_cvolume_merge(max_volume, max_volume, &remapped);
1585 }
1586 }
1587
1588 /* Called from main thread. Only called for the root sink in volume sharing
1589 * cases, except for internal recursive calls. */
1590 static pa_bool_t has_inputs(pa_sink *s) {
1591 pa_sink_input *i;
1592 uint32_t idx;
1593
1594 pa_sink_assert_ref(s);
1595
1596 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1597 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1598 return TRUE;
1599 }
1600
1601 return FALSE;
1602 }
1603
1604 /* Called from main thread. Only called for the root sink in volume sharing
1605 * cases, except for internal recursive calls. */
1606 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1607 pa_sink_input *i;
1608 uint32_t idx;
1609
1610 pa_sink_assert_ref(s);
1611 pa_assert(new_volume);
1612 pa_assert(channel_map);
1613
1614 s->real_volume = *new_volume;
1615 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1616
1617 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1618 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1619 if (pa_sink_flat_volume_enabled(s)) {
1620 pa_cvolume old_volume = i->volume;
1621
1622 /* Follow the root sink's real volume. */
1623 i->volume = *new_volume;
1624 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1625 compute_reference_ratio(i);
1626
1627 /* The volume changed, let's tell people so */
1628 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1629 if (i->volume_changed)
1630 i->volume_changed(i);
1631
1632 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1633 }
1634 }
1635
1636 update_real_volume(i->origin_sink, new_volume, channel_map);
1637 }
1638 }
1639 }
1640
1641 /* Called from main thread. Only called for the root sink in shared volume
1642 * cases. */
1643 static void compute_real_volume(pa_sink *s) {
1644 pa_sink_assert_ref(s);
1645 pa_assert_ctl_context();
1646 pa_assert(PA_SINK_IS_LINKED(s->state));
1647 pa_assert(pa_sink_flat_volume_enabled(s));
1648 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1649
1650 /* This determines the maximum volume of all streams and sets
1651 * s->real_volume accordingly. */
1652
1653 if (!has_inputs(s)) {
1654 /* In the special case that we have no sink inputs we leave the
1655 * volume unmodified. */
1656 update_real_volume(s, &s->reference_volume, &s->channel_map);
1657 return;
1658 }
1659
1660 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1661
1662 /* First let's determine the new maximum volume of all inputs
1663 * connected to this sink */
1664 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1665 update_real_volume(s, &s->real_volume, &s->channel_map);
1666
1667 /* Then, let's update the real ratios/soft volumes of all inputs
1668 * connected to this sink */
1669 compute_real_ratios(s);
1670 }
1671
1672 /* Called from main thread. Only called for the root sink in shared volume
1673 * cases, except for internal recursive calls. */
1674 static void propagate_reference_volume(pa_sink *s) {
1675 pa_sink_input *i;
1676 uint32_t idx;
1677
1678 pa_sink_assert_ref(s);
1679 pa_assert_ctl_context();
1680 pa_assert(PA_SINK_IS_LINKED(s->state));
1681 pa_assert(pa_sink_flat_volume_enabled(s));
1682
1683 /* This is called whenever the sink volume changes that is not
1684 * caused by a sink input volume change. We need to fix up the
1685 * sink input volumes accordingly */
1686
1687 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1688 pa_cvolume old_volume;
1689
1690 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1691 propagate_reference_volume(i->origin_sink);
1692
1693 /* Since the origin sink uses volume sharing, this input's volume
1694 * needs to be updated to match the root sink's real volume, but
1695 * that will be done later in update_shared_real_volume(). */
1696 continue;
1697 }
1698
1699 old_volume = i->volume;
1700
1701 /* This basically calculates:
1702 *
1703 * i->volume := s->reference_volume * i->reference_ratio */
1704
1705 i->volume = s->reference_volume;
1706 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1707 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1708
1709 /* The volume changed, let's tell people so */
1710 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1711
1712 if (i->volume_changed)
1713 i->volume_changed(i);
1714
1715 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1716 }
1717 }
1718 }
1719
1720 /* Called from main thread. Only called for the root sink in volume sharing
1721 * cases, except for internal recursive calls. The return value indicates
1722 * whether any reference volume actually changed. */
1723 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1724 pa_cvolume volume;
1725 pa_bool_t reference_volume_changed;
1726 pa_sink_input *i;
1727 uint32_t idx;
1728
1729 pa_sink_assert_ref(s);
1730 pa_assert(PA_SINK_IS_LINKED(s->state));
1731 pa_assert(v);
1732 pa_assert(channel_map);
1733 pa_assert(pa_cvolume_valid(v));
1734
1735 volume = *v;
1736 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1737
1738 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1739 s->reference_volume = volume;
1740
1741 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1742
1743 if (reference_volume_changed)
1744 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1745 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1746 /* If the root sink's volume doesn't change, then there can't be any
1747 * changes in the other sinks in the sink tree either.
1748 *
1749 * It's probably theoretically possible that even if the root sink's
1750 * volume changes slightly, some filter sink doesn't change its volume
1751 * due to rounding errors. If that happens, we still want to propagate
1752 * the changed root sink volume to the sinks connected to the
1753 * intermediate sink that didn't change its volume. This theoretical
1754 * possiblity is the reason why we have that !(s->flags &
1755 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1756 * notice even if we returned here FALSE always if
1757 * reference_volume_changed is FALSE. */
1758 return FALSE;
1759
1760 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1761 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1762 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1763 }
1764
1765 return TRUE;
1766 }
1767
1768 /* Called from main thread */
1769 void pa_sink_set_volume(
1770 pa_sink *s,
1771 const pa_cvolume *volume,
1772 pa_bool_t send_msg,
1773 pa_bool_t save) {
1774
1775 pa_cvolume new_reference_volume;
1776 pa_sink *root_sink = s;
1777
1778 pa_sink_assert_ref(s);
1779 pa_assert_ctl_context();
1780 pa_assert(PA_SINK_IS_LINKED(s->state));
1781 pa_assert(!volume || pa_cvolume_valid(volume));
1782 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1783 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1784
1785 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1786 if (pa_sink_is_passthrough(s)) {
1787 /* FIXME: Need to notify client that volume control is disabled */
1788 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1789 return;
1790 }
1791
1792 /* In case of volume sharing, the volume is set for the root sink first,
1793 * from which it's then propagated to the sharing sinks. */
1794 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
1795 root_sink = root_sink->input_to_master->sink;
1796
1797 /* As a special exception we accept mono volumes on all sinks --
1798 * even on those with more complex channel maps */
1799
1800 if (volume) {
1801 if (pa_cvolume_compatible(volume, &s->sample_spec))
1802 new_reference_volume = *volume;
1803 else {
1804 new_reference_volume = s->reference_volume;
1805 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1806 }
1807
1808 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
1809 }
1810
1811 /* If volume is NULL we synchronize the sink's real and reference
1812 * volumes with the stream volumes. If it is not NULL we update
1813 * the reference_volume with it. */
1814
1815 if (volume) {
1816 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
1817 if (pa_sink_flat_volume_enabled(root_sink)) {
1818 /* OK, propagate this volume change back to the inputs */
1819 propagate_reference_volume(root_sink);
1820
1821 /* And now recalculate the real volume */
1822 compute_real_volume(root_sink);
1823 } else
1824 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
1825 }
1826
1827 } else {
1828 pa_assert(pa_sink_flat_volume_enabled(root_sink));
1829
1830 /* Ok, let's determine the new real volume */
1831 compute_real_volume(root_sink);
1832
1833 /* Let's 'push' the reference volume if necessary */
1834 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
1835 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
1836
1837 /* Now that the reference volume is updated, we can update the streams'
1838 * reference ratios. */
1839 compute_reference_ratios(root_sink);
1840 }
1841
1842 if (root_sink->set_volume) {
1843 /* If we have a function set_volume(), then we do not apply a
1844 * soft volume by default. However, set_volume() is free to
1845 * apply one to root_sink->soft_volume */
1846
1847 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
1848 if (!(root_sink->flags & PA_SINK_SYNC_VOLUME))
1849 root_sink->set_volume(root_sink);
1850
1851 } else
1852 /* If we have no function set_volume(), then the soft volume
1853 * becomes the real volume */
1854 root_sink->soft_volume = root_sink->real_volume;
1855
1856 /* This tells the sink that soft volume and/or real volume changed */
1857 if (send_msg)
1858 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1859 }
1860
1861 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1862 * Only to be called by sink implementor */
1863 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1864
1865 pa_sink_assert_ref(s);
1866 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1867
1868 if (s->flags & PA_SINK_SYNC_VOLUME)
1869 pa_sink_assert_io_context(s);
1870 else
1871 pa_assert_ctl_context();
1872
1873 if (!volume)
1874 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1875 else
1876 s->soft_volume = *volume;
1877
1878 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
1879 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1880 else
1881 s->thread_info.soft_volume = s->soft_volume;
1882 }
1883
1884 /* Called from the main thread. Only called for the root sink in volume sharing
1885 * cases, except for internal recursive calls. */
1886 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1887 pa_sink_input *i;
1888 uint32_t idx;
1889
1890 pa_sink_assert_ref(s);
1891 pa_assert(old_real_volume);
1892 pa_assert_ctl_context();
1893 pa_assert(PA_SINK_IS_LINKED(s->state));
1894
1895 /* This is called when the hardware's real volume changes due to
1896 * some external event. We copy the real volume into our
1897 * reference volume and then rebuild the stream volumes based on
1898 * i->real_ratio which should stay fixed. */
1899
1900 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1901 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1902 return;
1903
1904 /* 1. Make the real volume the reference volume */
1905 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1906 }
1907
1908 if (pa_sink_flat_volume_enabled(s)) {
1909
1910 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1911 pa_cvolume old_volume = i->volume;
1912
1913 /* 2. Since the sink's reference and real volumes are equal
1914 * now our ratios should be too. */
1915 i->reference_ratio = i->real_ratio;
1916
1917 /* 3. Recalculate the new stream reference volume based on the
1918 * reference ratio and the sink's reference volume.
1919 *
1920 * This basically calculates:
1921 *
1922 * i->volume = s->reference_volume * i->reference_ratio
1923 *
1924 * This is identical to propagate_reference_volume() */
1925 i->volume = s->reference_volume;
1926 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1927 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1928
1929 /* Notify if something changed */
1930 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1931
1932 if (i->volume_changed)
1933 i->volume_changed(i);
1934
1935 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1936 }
1937
1938 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1939 propagate_real_volume(i->origin_sink, old_real_volume);
1940 }
1941 }
1942
1943 /* Something got changed in the hardware. It probably makes sense
1944 * to save changed hw settings given that hw volume changes not
1945 * triggered by PA are almost certainly done by the user. */
1946 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1947 s->save_volume = TRUE;
1948 }
1949
1950 /* Called from io thread */
1951 void pa_sink_update_volume_and_mute(pa_sink *s) {
1952 pa_assert(s);
1953 pa_sink_assert_io_context(s);
1954
1955 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1956 }
1957
1958 /* Called from main thread */
1959 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1960 pa_sink_assert_ref(s);
1961 pa_assert_ctl_context();
1962 pa_assert(PA_SINK_IS_LINKED(s->state));
1963
1964 if (s->refresh_volume || force_refresh) {
1965 struct pa_cvolume old_real_volume;
1966
1967 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1968
1969 old_real_volume = s->real_volume;
1970
1971 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
1972 s->get_volume(s);
1973
1974 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1975
1976 update_real_volume(s, &s->real_volume, &s->channel_map);
1977 propagate_real_volume(s, &old_real_volume);
1978 }
1979
1980 return &s->reference_volume;
1981 }
1982
1983 /* Called from main thread. In volume sharing cases, only the root sink may
1984 * call this. */
1985 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1986 pa_cvolume old_real_volume;
1987
1988 pa_sink_assert_ref(s);
1989 pa_assert_ctl_context();
1990 pa_assert(PA_SINK_IS_LINKED(s->state));
1991 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1992
1993 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1994
1995 old_real_volume = s->real_volume;
1996 update_real_volume(s, new_real_volume, &s->channel_map);
1997 propagate_real_volume(s, &old_real_volume);
1998 }
1999
2000 /* Called from main thread */
2001 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2002 pa_bool_t old_muted;
2003
2004 pa_sink_assert_ref(s);
2005 pa_assert_ctl_context();
2006 pa_assert(PA_SINK_IS_LINKED(s->state));
2007
2008 old_muted = s->muted;
2009 s->muted = mute;
2010 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2011
2012 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
2013 s->set_mute(s);
2014
2015 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2016
2017 if (old_muted != s->muted)
2018 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2019 }
2020
2021 /* Called from main thread */
2022 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2023
2024 pa_sink_assert_ref(s);
2025 pa_assert_ctl_context();
2026 pa_assert(PA_SINK_IS_LINKED(s->state));
2027
2028 if (s->refresh_muted || force_refresh) {
2029 pa_bool_t old_muted = s->muted;
2030
2031 if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
2032 s->get_mute(s);
2033
2034 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2035
2036 if (old_muted != s->muted) {
2037 s->save_muted = TRUE;
2038
2039 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2040
2041 /* Make sure the soft mute status stays in sync */
2042 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2043 }
2044 }
2045
2046 return s->muted;
2047 }
2048
2049 /* Called from main thread */
2050 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2051 pa_sink_assert_ref(s);
2052 pa_assert_ctl_context();
2053 pa_assert(PA_SINK_IS_LINKED(s->state));
2054
2055 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2056
2057 if (s->muted == new_muted)
2058 return;
2059
2060 s->muted = new_muted;
2061 s->save_muted = TRUE;
2062
2063 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2064 }
2065
2066 /* Called from main thread */
2067 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2068 pa_sink_assert_ref(s);
2069 pa_assert_ctl_context();
2070
2071 if (p)
2072 pa_proplist_update(s->proplist, mode, p);
2073
2074 if (PA_SINK_IS_LINKED(s->state)) {
2075 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2076 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2077 }
2078
2079 return TRUE;
2080 }
2081
2082 /* Called from main thread */
2083 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2084 void pa_sink_set_description(pa_sink *s, const char *description) {
2085 const char *old;
2086 pa_sink_assert_ref(s);
2087 pa_assert_ctl_context();
2088
2089 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2090 return;
2091
2092 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2093
2094 if (old && description && pa_streq(old, description))
2095 return;
2096
2097 if (description)
2098 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2099 else
2100 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2101
2102 if (s->monitor_source) {
2103 char *n;
2104
2105 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2106 pa_source_set_description(s->monitor_source, n);
2107 pa_xfree(n);
2108 }
2109
2110 if (PA_SINK_IS_LINKED(s->state)) {
2111 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2112 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2113 }
2114 }
2115
2116 /* Called from main thread */
2117 unsigned pa_sink_linked_by(pa_sink *s) {
2118 unsigned ret;
2119
2120 pa_sink_assert_ref(s);
2121 pa_assert_ctl_context();
2122 pa_assert(PA_SINK_IS_LINKED(s->state));
2123
2124 ret = pa_idxset_size(s->inputs);
2125
2126 /* We add in the number of streams connected to us here. Please
2127 * note the asymmmetry to pa_sink_used_by()! */
2128
2129 if (s->monitor_source)
2130 ret += pa_source_linked_by(s->monitor_source);
2131
2132 return ret;
2133 }
2134
2135 /* Called from main thread */
2136 unsigned pa_sink_used_by(pa_sink *s) {
2137 unsigned ret;
2138
2139 pa_sink_assert_ref(s);
2140 pa_assert_ctl_context();
2141 pa_assert(PA_SINK_IS_LINKED(s->state));
2142
2143 ret = pa_idxset_size(s->inputs);
2144 pa_assert(ret >= s->n_corked);
2145
2146 /* Streams connected to our monitor source do not matter for
2147 * pa_sink_used_by()!.*/
2148
2149 return ret - s->n_corked;
2150 }
2151
2152 /* Called from main thread */
2153 unsigned pa_sink_check_suspend(pa_sink *s) {
2154 unsigned ret;
2155 pa_sink_input *i;
2156 uint32_t idx;
2157
2158 pa_sink_assert_ref(s);
2159 pa_assert_ctl_context();
2160
2161 if (!PA_SINK_IS_LINKED(s->state))
2162 return 0;
2163
2164 ret = 0;
2165
2166 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2167 pa_sink_input_state_t st;
2168
2169 st = pa_sink_input_get_state(i);
2170
2171 /* We do not assert here. It is perfectly valid for a sink input to
2172 * be in the INIT state (i.e. created, marked done but not yet put)
2173 * and we should not care if it's unlinked as it won't contribute
2174 * towarards our busy status.
2175 */
2176 if (!PA_SINK_INPUT_IS_LINKED(st))
2177 continue;
2178
2179 if (st == PA_SINK_INPUT_CORKED)
2180 continue;
2181
2182 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2183 continue;
2184
2185 ret ++;
2186 }
2187
2188 if (s->monitor_source)
2189 ret += pa_source_check_suspend(s->monitor_source);
2190
2191 return ret;
2192 }
2193
2194 /* Called from the IO thread */
2195 static void sync_input_volumes_within_thread(pa_sink *s) {
2196 pa_sink_input *i;
2197 void *state = NULL;
2198
2199 pa_sink_assert_ref(s);
2200 pa_sink_assert_io_context(s);
2201
2202 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2203 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2204 continue;
2205
2206 i->thread_info.soft_volume = i->soft_volume;
2207 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2208 }
2209 }
2210
2211 /* Called from the IO thread. Only called for the root sink in volume sharing
2212 * cases, except for internal recursive calls. */
2213 static void set_shared_volume_within_thread(pa_sink *s) {
2214 pa_sink_input *i = NULL;
2215 void *state = NULL;
2216
2217 pa_sink_assert_ref(s);
2218
2219 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2220
2221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2222 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2223 set_shared_volume_within_thread(i->origin_sink);
2224 }
2225 }
2226
2227 /* Called from IO thread, except when it is not */
2228 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2229 pa_sink *s = PA_SINK(o);
2230 pa_sink_assert_ref(s);
2231
2232 switch ((pa_sink_message_t) code) {
2233
2234 case PA_SINK_MESSAGE_ADD_INPUT: {
2235 pa_sink_input *i = PA_SINK_INPUT(userdata);
2236
2237 /* If you change anything here, make sure to change the
2238 * sink input handling a few lines down at
2239 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2240
2241 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2242
2243 /* Since the caller sleeps in pa_sink_input_put(), we can
2244 * safely access data outside of thread_info even though
2245 * it is mutable */
2246
2247 if ((i->thread_info.sync_prev = i->sync_prev)) {
2248 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2249 pa_assert(i->sync_prev->sync_next == i);
2250 i->thread_info.sync_prev->thread_info.sync_next = i;
2251 }
2252
2253 if ((i->thread_info.sync_next = i->sync_next)) {
2254 pa_assert(i->sink == i->thread_info.sync_next->sink);
2255 pa_assert(i->sync_next->sync_prev == i);
2256 i->thread_info.sync_next->thread_info.sync_prev = i;
2257 }
2258
2259 pa_assert(!i->thread_info.attached);
2260 i->thread_info.attached = TRUE;
2261
2262 if (i->attach)
2263 i->attach(i);
2264
2265 pa_sink_input_set_state_within_thread(i, i->state);
2266
2267 /* The requested latency of the sink input needs to be
2268 * fixed up and then configured on the sink */
2269
2270 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2271 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2272
2273 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2274 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2275
2276 /* We don't rewind here automatically. This is left to the
2277 * sink input implementor because some sink inputs need a
2278 * slow start, i.e. need some time to buffer client
2279 * samples before beginning streaming. */
2280
2281 /* In flat volume mode we need to update the volume as
2282 * well */
2283 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2284 }
2285
2286 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2287 pa_sink_input *i = PA_SINK_INPUT(userdata);
2288
2289 /* If you change anything here, make sure to change the
2290 * sink input handling a few lines down at
2291 * PA_SINK_MESSAGE_START_MOVE, too. */
2292
2293 if (i->detach)
2294 i->detach(i);
2295
2296 pa_sink_input_set_state_within_thread(i, i->state);
2297
2298 pa_assert(i->thread_info.attached);
2299 i->thread_info.attached = FALSE;
2300
2301 /* Since the caller sleeps in pa_sink_input_unlink(),
2302 * we can safely access data outside of thread_info even
2303 * though it is mutable */
2304
2305 pa_assert(!i->sync_prev);
2306 pa_assert(!i->sync_next);
2307
2308 if (i->thread_info.sync_prev) {
2309 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2310 i->thread_info.sync_prev = NULL;
2311 }
2312
2313 if (i->thread_info.sync_next) {
2314 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2315 i->thread_info.sync_next = NULL;
2316 }
2317
2318 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2319 pa_sink_input_unref(i);
2320
2321 pa_sink_invalidate_requested_latency(s, TRUE);
2322 pa_sink_request_rewind(s, (size_t) -1);
2323
2324 /* In flat volume mode we need to update the volume as
2325 * well */
2326 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2327 }
2328
2329 case PA_SINK_MESSAGE_START_MOVE: {
2330 pa_sink_input *i = PA_SINK_INPUT(userdata);
2331
2332 /* We don't support moving synchronized streams. */
2333 pa_assert(!i->sync_prev);
2334 pa_assert(!i->sync_next);
2335 pa_assert(!i->thread_info.sync_next);
2336 pa_assert(!i->thread_info.sync_prev);
2337
2338 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2339 pa_usec_t usec = 0;
2340 size_t sink_nbytes, total_nbytes;
2341
2342 /* Get the latency of the sink */
2343 usec = pa_sink_get_latency_within_thread(s);
2344 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2345 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2346
2347 if (total_nbytes > 0) {
2348 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2349 i->thread_info.rewrite_flush = TRUE;
2350 pa_sink_input_process_rewind(i, sink_nbytes);
2351 }
2352 }
2353
2354 if (i->detach)
2355 i->detach(i);
2356
2357 pa_assert(i->thread_info.attached);
2358 i->thread_info.attached = FALSE;
2359
2360 /* Let's remove the sink input ...*/
2361 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2362 pa_sink_input_unref(i);
2363
2364 pa_sink_invalidate_requested_latency(s, TRUE);
2365
2366 pa_log_debug("Requesting rewind due to started move");
2367 pa_sink_request_rewind(s, (size_t) -1);
2368
2369 /* In flat volume mode we need to update the volume as
2370 * well */
2371 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2372 }
2373
2374 case PA_SINK_MESSAGE_FINISH_MOVE: {
2375 pa_sink_input *i = PA_SINK_INPUT(userdata);
2376
2377 /* We don't support moving synchronized streams. */
2378 pa_assert(!i->sync_prev);
2379 pa_assert(!i->sync_next);
2380 pa_assert(!i->thread_info.sync_next);
2381 pa_assert(!i->thread_info.sync_prev);
2382
2383 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2384
2385 pa_assert(!i->thread_info.attached);
2386 i->thread_info.attached = TRUE;
2387
2388 if (i->attach)
2389 i->attach(i);
2390
2391 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2392 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2393
2394 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2395 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2396
2397 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2398 pa_usec_t usec = 0;
2399 size_t nbytes;
2400
2401 /* Get the latency of the sink */
2402 usec = pa_sink_get_latency_within_thread(s);
2403 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2404
2405 if (nbytes > 0)
2406 pa_sink_input_drop(i, nbytes);
2407
2408 pa_log_debug("Requesting rewind due to finished move");
2409 pa_sink_request_rewind(s, nbytes);
2410 }
2411
2412 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2413 }
2414
2415 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2416 pa_sink *root_sink = s;
2417
2418 while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
2419 root_sink = root_sink->input_to_master->sink;
2420
2421 set_shared_volume_within_thread(root_sink);
2422 return 0;
2423 }
2424
2425 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2426
2427 if (s->flags & PA_SINK_SYNC_VOLUME) {
2428 s->set_volume(s);
2429 pa_sink_volume_change_push(s);
2430 }
2431 /* Fall through ... */
2432
2433 case PA_SINK_MESSAGE_SET_VOLUME:
2434
2435 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2436 s->thread_info.soft_volume = s->soft_volume;
2437 pa_sink_request_rewind(s, (size_t) -1);
2438 }
2439
2440 /* Fall through ... */
2441
2442 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2443 sync_input_volumes_within_thread(s);
2444 return 0;
2445
2446 case PA_SINK_MESSAGE_GET_VOLUME:
2447
2448 if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
2449 s->get_volume(s);
2450 pa_sink_volume_change_flush(s);
2451 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2452 }
2453
2454 /* In case sink implementor reset SW volume. */
2455 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2456 s->thread_info.soft_volume = s->soft_volume;
2457 pa_sink_request_rewind(s, (size_t) -1);
2458 }
2459
2460 return 0;
2461
2462 case PA_SINK_MESSAGE_SET_MUTE:
2463
2464 if (s->thread_info.soft_muted != s->muted) {
2465 s->thread_info.soft_muted = s->muted;
2466 pa_sink_request_rewind(s, (size_t) -1);
2467 }
2468
2469 if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
2470 s->set_mute(s);
2471
2472 return 0;
2473
2474 case PA_SINK_MESSAGE_GET_MUTE:
2475
2476 if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
2477 s->get_mute(s);
2478
2479 return 0;
2480
2481 case PA_SINK_MESSAGE_SET_STATE: {
2482
2483 pa_bool_t suspend_change =
2484 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2485 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2486
2487 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2488
2489 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2490 s->thread_info.rewind_nbytes = 0;
2491 s->thread_info.rewind_requested = FALSE;
2492 }
2493
2494 if (suspend_change) {
2495 pa_sink_input *i;
2496 void *state = NULL;
2497
2498 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2499 if (i->suspend_within_thread)
2500 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2501 }
2502
2503 return 0;
2504 }
2505
2506 case PA_SINK_MESSAGE_DETACH:
2507
2508 /* Detach all streams */
2509 pa_sink_detach_within_thread(s);
2510 return 0;
2511
2512 case PA_SINK_MESSAGE_ATTACH:
2513
2514 /* Reattach all streams */
2515 pa_sink_attach_within_thread(s);
2516 return 0;
2517
2518 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2519
2520 pa_usec_t *usec = userdata;
2521 *usec = pa_sink_get_requested_latency_within_thread(s);
2522
2523 /* Yes, that's right, the IO thread will see -1 when no
2524 * explicit requested latency is configured, the main
2525 * thread will see max_latency */
2526 if (*usec == (pa_usec_t) -1)
2527 *usec = s->thread_info.max_latency;
2528
2529 return 0;
2530 }
2531
2532 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2533 pa_usec_t *r = userdata;
2534
2535 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2536
2537 return 0;
2538 }
2539
2540 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2541 pa_usec_t *r = userdata;
2542
2543 r[0] = s->thread_info.min_latency;
2544 r[1] = s->thread_info.max_latency;
2545
2546 return 0;
2547 }
2548
2549 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2550
2551 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2552 return 0;
2553
2554 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2555
2556 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2557 return 0;
2558
2559 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2560
2561 *((size_t*) userdata) = s->thread_info.max_rewind;
2562 return 0;
2563
2564 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2565
2566 *((size_t*) userdata) = s->thread_info.max_request;
2567 return 0;
2568
2569 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2570
2571 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2572 return 0;
2573
2574 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2575
2576 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2577 return 0;
2578
2579 case PA_SINK_MESSAGE_SET_PORT:
2580
2581 pa_assert(userdata);
2582 if (s->set_port) {
2583 struct sink_message_set_port *msg_data = userdata;
2584 msg_data->ret = s->set_port(s, msg_data->port);
2585 }
2586 return 0;
2587
2588 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2589 /* This message is sent from IO-thread and handled in main thread. */
2590 pa_assert_ctl_context();
2591
2592 pa_sink_get_volume(s, TRUE);
2593 pa_sink_get_mute(s, TRUE);
2594 return 0;
2595
2596 case PA_SINK_MESSAGE_GET_LATENCY:
2597 case PA_SINK_MESSAGE_MAX:
2598 ;
2599 }
2600
2601 return -1;
2602 }
2603
2604 /* Called from main thread */
2605 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2606 pa_sink *sink;
2607 uint32_t idx;
2608 int ret = 0;
2609
2610 pa_core_assert_ref(c);
2611 pa_assert_ctl_context();
2612 pa_assert(cause != 0);
2613
2614 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2615 int r;
2616
2617 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2618 ret = r;
2619 }
2620
2621 return ret;
2622 }
2623
2624 /* Called from main thread */
2625 void pa_sink_detach(pa_sink *s) {
2626 pa_sink_assert_ref(s);
2627 pa_assert_ctl_context();
2628 pa_assert(PA_SINK_IS_LINKED(s->state));
2629
2630 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2631 }
2632
2633 /* Called from main thread */
2634 void pa_sink_attach(pa_sink *s) {
2635 pa_sink_assert_ref(s);
2636 pa_assert_ctl_context();
2637 pa_assert(PA_SINK_IS_LINKED(s->state));
2638
2639 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2640 }
2641
2642 /* Called from IO thread */
2643 void pa_sink_detach_within_thread(pa_sink *s) {
2644 pa_sink_input *i;
2645 void *state = NULL;
2646
2647 pa_sink_assert_ref(s);
2648 pa_sink_assert_io_context(s);
2649 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2650
2651 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2652 if (i->detach)
2653 i->detach(i);
2654
2655 if (s->monitor_source)
2656 pa_source_detach_within_thread(s->monitor_source);
2657 }
2658
2659 /* Called from IO thread */
2660 void pa_sink_attach_within_thread(pa_sink *s) {
2661 pa_sink_input *i;
2662 void *state = NULL;
2663
2664 pa_sink_assert_ref(s);
2665 pa_sink_assert_io_context(s);
2666 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2667
2668 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2669 if (i->attach)
2670 i->attach(i);
2671
2672 if (s->monitor_source)
2673 pa_source_attach_within_thread(s->monitor_source);
2674 }
2675
2676 /* Called from IO thread */
2677 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2678 pa_sink_assert_ref(s);
2679 pa_sink_assert_io_context(s);
2680 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2681
2682 if (s->thread_info.state == PA_SINK_SUSPENDED)
2683 return;
2684
2685 if (nbytes == (size_t) -1)
2686 nbytes = s->thread_info.max_rewind;
2687
2688 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2689
2690 if (s->thread_info.rewind_requested &&
2691 nbytes <= s->thread_info.rewind_nbytes)
2692 return;
2693
2694 s->thread_info.rewind_nbytes = nbytes;
2695 s->thread_info.rewind_requested = TRUE;
2696
2697 if (s->request_rewind)
2698 s->request_rewind(s);
2699 }
2700
2701 /* Called from IO thread */
2702 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2703 pa_usec_t result = (pa_usec_t) -1;
2704 pa_sink_input *i;
2705 void *state = NULL;
2706 pa_usec_t monitor_latency;
2707
2708 pa_sink_assert_ref(s);
2709 pa_sink_assert_io_context(s);
2710
2711 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2712 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2713
2714 if (s->thread_info.requested_latency_valid)
2715 return s->thread_info.requested_latency;
2716
2717 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2718 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2719 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2720 result = i->thread_info.requested_sink_latency;
2721
2722 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2723
2724 if (monitor_latency != (pa_usec_t) -1 &&
2725 (result == (pa_usec_t) -1 || result > monitor_latency))
2726 result = monitor_latency;
2727
2728 if (result != (pa_usec_t) -1)
2729 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2730
2731 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2732 /* Only cache if properly initialized */
2733 s->thread_info.requested_latency = result;
2734 s->thread_info.requested_latency_valid = TRUE;
2735 }
2736
2737 return result;
2738 }
2739
2740 /* Called from main thread */
2741 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2742 pa_usec_t usec = 0;
2743
2744 pa_sink_assert_ref(s);
2745 pa_assert_ctl_context();
2746 pa_assert(PA_SINK_IS_LINKED(s->state));
2747
2748 if (s->state == PA_SINK_SUSPENDED)
2749 return 0;
2750
2751 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2752
2753 return usec;
2754 }
2755
2756 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2757 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2758 pa_sink_input *i;
2759 void *state = NULL;
2760
2761 pa_sink_assert_ref(s);
2762 pa_sink_assert_io_context(s);
2763
2764 if (max_rewind == s->thread_info.max_rewind)
2765 return;
2766
2767 s->thread_info.max_rewind = max_rewind;
2768
2769 if (PA_SINK_IS_LINKED(s->thread_info.state))
2770 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2771 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2772
2773 if (s->monitor_source)
2774 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2775 }
2776
2777 /* Called from main thread */
2778 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2779 pa_sink_assert_ref(s);
2780 pa_assert_ctl_context();
2781
2782 if (PA_SINK_IS_LINKED(s->state))
2783 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2784 else
2785 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2786 }
2787
2788 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2789 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2790 void *state = NULL;
2791
2792 pa_sink_assert_ref(s);
2793 pa_sink_assert_io_context(s);
2794
2795 if (max_request == s->thread_info.max_request)
2796 return;
2797
2798 s->thread_info.max_request = max_request;
2799
2800 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2801 pa_sink_input *i;
2802
2803 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2804 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2805 }
2806 }
2807
2808 /* Called from main thread */
2809 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2810 pa_sink_assert_ref(s);
2811 pa_assert_ctl_context();
2812
2813 if (PA_SINK_IS_LINKED(s->state))
2814 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2815 else
2816 pa_sink_set_max_request_within_thread(s, max_request);
2817 }
2818
2819 /* Called from IO thread */
2820 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2821 pa_sink_input *i;
2822 void *state = NULL;
2823
2824 pa_sink_assert_ref(s);
2825 pa_sink_assert_io_context(s);
2826
2827 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2828 s->thread_info.requested_latency_valid = FALSE;
2829 else if (dynamic)
2830 return;
2831
2832 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2833
2834 if (s->update_requested_latency)
2835 s->update_requested_latency(s);
2836
2837 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2838 if (i->update_sink_requested_latency)
2839 i->update_sink_requested_latency(i);
2840 }
2841 }
2842
2843 /* Called from main thread */
2844 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2845 pa_sink_assert_ref(s);
2846 pa_assert_ctl_context();
2847
2848 /* min_latency == 0: no limit
2849 * min_latency anything else: specified limit
2850 *
2851 * Similar for max_latency */
2852
2853 if (min_latency < ABSOLUTE_MIN_LATENCY)
2854 min_latency = ABSOLUTE_MIN_LATENCY;
2855
2856 if (max_latency <= 0 ||
2857 max_latency > ABSOLUTE_MAX_LATENCY)
2858 max_latency = ABSOLUTE_MAX_LATENCY;
2859
2860 pa_assert(min_latency <= max_latency);
2861
2862 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2863 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2864 max_latency == ABSOLUTE_MAX_LATENCY) ||
2865 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2866
2867 if (PA_SINK_IS_LINKED(s->state)) {
2868 pa_usec_t r[2];
2869
2870 r[0] = min_latency;
2871 r[1] = max_latency;
2872
2873 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2874 } else
2875 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2876 }
2877
2878 /* Called from main thread */
2879 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2880 pa_sink_assert_ref(s);
2881 pa_assert_ctl_context();
2882 pa_assert(min_latency);
2883 pa_assert(max_latency);
2884
2885 if (PA_SINK_IS_LINKED(s->state)) {
2886 pa_usec_t r[2] = { 0, 0 };
2887
2888 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2889
2890 *min_latency = r[0];
2891 *max_latency = r[1];
2892 } else {
2893 *min_latency = s->thread_info.min_latency;
2894 *max_latency = s->thread_info.max_latency;
2895 }
2896 }
2897
2898 /* Called from IO thread */
2899 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2900 pa_sink_assert_ref(s);
2901 pa_sink_assert_io_context(s);
2902
2903 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2904 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2905 pa_assert(min_latency <= max_latency);
2906
2907 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2908 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2909 max_latency == ABSOLUTE_MAX_LATENCY) ||
2910 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2911
2912 if (s->thread_info.min_latency == min_latency &&
2913 s->thread_info.max_latency == max_latency)
2914 return;
2915
2916 s->thread_info.min_latency = min_latency;
2917 s->thread_info.max_latency = max_latency;
2918
2919 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2920 pa_sink_input *i;
2921 void *state = NULL;
2922
2923 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2924 if (i->update_sink_latency_range)
2925 i->update_sink_latency_range(i);
2926 }
2927
2928 pa_sink_invalidate_requested_latency(s, FALSE);
2929
2930 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2931 }
2932
2933 /* Called from main thread */
2934 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2935 pa_sink_assert_ref(s);
2936 pa_assert_ctl_context();
2937
2938 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2939 pa_assert(latency == 0);
2940 return;
2941 }
2942
2943 if (latency < ABSOLUTE_MIN_LATENCY)
2944 latency = ABSOLUTE_MIN_LATENCY;
2945
2946 if (latency > ABSOLUTE_MAX_LATENCY)
2947 latency = ABSOLUTE_MAX_LATENCY;
2948
2949 if (PA_SINK_IS_LINKED(s->state))
2950 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2951 else
2952 s->thread_info.fixed_latency = latency;
2953
2954 pa_source_set_fixed_latency(s->monitor_source, latency);
2955 }
2956
2957 /* Called from main thread */
2958 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2959 pa_usec_t latency;
2960
2961 pa_sink_assert_ref(s);
2962 pa_assert_ctl_context();
2963
2964 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2965 return 0;
2966
2967 if (PA_SINK_IS_LINKED(s->state))
2968 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2969 else
2970 latency = s->thread_info.fixed_latency;
2971
2972 return latency;
2973 }
2974
2975 /* Called from IO thread */
2976 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2977 pa_sink_assert_ref(s);
2978 pa_sink_assert_io_context(s);
2979
2980 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2981 pa_assert(latency == 0);
2982 return;
2983 }
2984
2985 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2986 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2987
2988 if (s->thread_info.fixed_latency == latency)
2989 return;
2990
2991 s->thread_info.fixed_latency = latency;
2992
2993 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2994 pa_sink_input *i;
2995 void *state = NULL;
2996
2997 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2998 if (i->update_sink_fixed_latency)
2999 i->update_sink_fixed_latency(i);
3000 }
3001
3002 pa_sink_invalidate_requested_latency(s, FALSE);
3003
3004 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3005 }
3006
3007 /* Called from main context */
3008 size_t pa_sink_get_max_rewind(pa_sink *s) {
3009 size_t r;
3010 pa_assert_ctl_context();
3011 pa_sink_assert_ref(s);
3012
3013 if (!PA_SINK_IS_LINKED(s->state))
3014 return s->thread_info.max_rewind;
3015
3016 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3017
3018 return r;
3019 }
3020
3021 /* Called from main context */
3022 size_t pa_sink_get_max_request(pa_sink *s) {
3023 size_t r;
3024 pa_sink_assert_ref(s);
3025 pa_assert_ctl_context();
3026
3027 if (!PA_SINK_IS_LINKED(s->state))
3028 return s->thread_info.max_request;
3029
3030 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3031
3032 return r;
3033 }
3034
3035 /* Called from main context */
3036 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3037 pa_device_port *port;
3038 int ret;
3039
3040 pa_sink_assert_ref(s);
3041 pa_assert_ctl_context();
3042
3043 if (!s->set_port) {
3044 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3045 return -PA_ERR_NOTIMPLEMENTED;
3046 }
3047
3048 if (!s->ports)
3049 return -PA_ERR_NOENTITY;
3050
3051 if (!(port = pa_hashmap_get(s->ports, name)))
3052 return -PA_ERR_NOENTITY;
3053
3054 if (s->active_port == port) {
3055 s->save_port = s->save_port || save;
3056 return 0;
3057 }
3058
3059 if (s->flags & PA_SINK_SYNC_VOLUME) {
3060 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3061 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3062 ret = msg.ret;
3063 }
3064 else
3065 ret = s->set_port(s, port);
3066
3067 if (ret < 0)
3068 return -PA_ERR_NOENTITY;
3069
3070 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3071
3072 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3073
3074 s->active_port = port;
3075 s->save_port = save;
3076
3077 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3078
3079 return 0;
3080 }
3081
3082 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3083 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3084
3085 pa_assert(p);
3086
3087 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3088 return TRUE;
3089
3090 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3091
3092 if (pa_streq(ff, "microphone"))
3093 t = "audio-input-microphone";
3094 else if (pa_streq(ff, "webcam"))
3095 t = "camera-web";
3096 else if (pa_streq(ff, "computer"))
3097 t = "computer";
3098 else if (pa_streq(ff, "handset"))
3099 t = "phone";
3100 else if (pa_streq(ff, "portable"))
3101 t = "multimedia-player";
3102 else if (pa_streq(ff, "tv"))
3103 t = "video-display";
3104
3105 /*
3106 * The following icons are not part of the icon naming spec,
3107 * because Rodney Dawes sucks as the maintainer of that spec.
3108 *
3109 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3110 */
3111 else if (pa_streq(ff, "headset"))
3112 t = "audio-headset";
3113 else if (pa_streq(ff, "headphone"))
3114 t = "audio-headphones";
3115 else if (pa_streq(ff, "speaker"))
3116 t = "audio-speakers";
3117 else if (pa_streq(ff, "hands-free"))
3118 t = "audio-handsfree";
3119 }
3120
3121 if (!t)
3122 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3123 if (pa_streq(c, "modem"))
3124 t = "modem";
3125
3126 if (!t) {
3127 if (is_sink)
3128 t = "audio-card";
3129 else
3130 t = "audio-input-microphone";
3131 }
3132
3133 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3134 if (strstr(profile, "analog"))
3135 s = "-analog";
3136 else if (strstr(profile, "iec958"))
3137 s = "-iec958";
3138 else if (strstr(profile, "hdmi"))
3139 s = "-hdmi";
3140 }
3141
3142 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3143
3144 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3145
3146 return TRUE;
3147 }
3148
3149 pa_bool_t pa_device_init_description(pa_proplist *p) {
3150 const char *s, *d = NULL, *k;
3151 pa_assert(p);
3152
3153 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3154 return TRUE;
3155
3156 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3157 if (pa_streq(s, "internal"))
3158 d = _("Internal Audio");
3159
3160 if (!d)
3161 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3162 if (pa_streq(s, "modem"))
3163 d = _("Modem");
3164
3165 if (!d)
3166 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3167
3168 if (!d)
3169 return FALSE;
3170
3171 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3172
3173 if (d && k)
3174 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
3175 else if (d)
3176 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3177
3178 return TRUE;
3179 }
3180
3181 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3182 const char *s;
3183 pa_assert(p);
3184
3185 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3186 return TRUE;
3187
3188 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3189 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3190 || pa_streq(s, "headset")) {
3191 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3192 return TRUE;
3193 }
3194
3195 return FALSE;
3196 }
3197
3198 unsigned pa_device_init_priority(pa_proplist *p) {
3199 const char *s;
3200 unsigned priority = 0;
3201
3202 pa_assert(p);
3203
3204 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3205
3206 if (pa_streq(s, "sound"))
3207 priority += 9000;
3208 else if (!pa_streq(s, "modem"))
3209 priority += 1000;
3210 }
3211
3212 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3213
3214 if (pa_streq(s, "internal"))
3215 priority += 900;
3216 else if (pa_streq(s, "speaker"))
3217 priority += 500;
3218 else if (pa_streq(s, "headphone"))
3219 priority += 400;
3220 }
3221
3222 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3223
3224 if (pa_streq(s, "pci"))
3225 priority += 50;
3226 else if (pa_streq(s, "usb"))
3227 priority += 40;
3228 else if (pa_streq(s, "bluetooth"))
3229 priority += 30;
3230 }
3231
3232 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3233
3234 if (pa_startswith(s, "analog-"))
3235 priority += 9;
3236 else if (pa_startswith(s, "iec958-"))
3237 priority += 8;
3238 }
3239
3240 return priority;
3241 }
3242
3243 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3244
3245 /* Called from the IO thread. */
3246 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3247 pa_sink_volume_change *c;
3248 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3249 c = pa_xnew(pa_sink_volume_change, 1);
3250
3251 PA_LLIST_INIT(pa_sink_volume_change, c);
3252 c->at = 0;
3253 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3254 return c;
3255 }
3256
3257 /* Called from the IO thread. */
3258 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3259 pa_assert(c);
3260 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3261 pa_xfree(c);
3262 }
3263
3264 /* Called from the IO thread. */
3265 void pa_sink_volume_change_push(pa_sink *s) {
3266 pa_sink_volume_change *c = NULL;
3267 pa_sink_volume_change *nc = NULL;
3268 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3269
3270 const char *direction = NULL;
3271
3272 pa_assert(s);
3273 nc = pa_sink_volume_change_new(s);
3274
3275 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3276 * Adding one more volume for HW would get us rid of this, but I am trying
3277 * to survive with the ones we already have. */
3278 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3279
3280 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3281 pa_log_debug("Volume not changing");
3282 pa_sink_volume_change_free(nc);
3283 return;
3284 }
3285
3286 nc->at = pa_sink_get_latency_within_thread(s);
3287 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3288
3289 if (s->thread_info.volume_changes_tail) {
3290 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3291 /* If volume is going up let's do it a bit late. If it is going
3292 * down let's do it a bit early. */
3293 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3294 if (nc->at + safety_margin > c->at) {
3295 nc->at += safety_margin;
3296 direction = "up";
3297 break;
3298 }
3299 }
3300 else if (nc->at - safety_margin > c->at) {
3301 nc->at -= safety_margin;
3302 direction = "down";
3303 break;
3304 }
3305 }
3306 }
3307
3308 if (c == NULL) {
3309 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3310 nc->at += safety_margin;
3311 direction = "up";
3312 } else {
3313 nc->at -= safety_margin;
3314 direction = "down";
3315 }
3316 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3317 }
3318 else {
3319 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3320 }
3321
3322 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3323
3324 /* We can ignore volume events that came earlier but should happen later than this. */
3325 PA_LLIST_FOREACH(c, nc->next) {
3326 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3327 pa_sink_volume_change_free(c);
3328 }
3329 nc->next = NULL;
3330 s->thread_info.volume_changes_tail = nc;
3331 }
3332
3333 /* Called from the IO thread. */
3334 static void pa_sink_volume_change_flush(pa_sink *s) {
3335 pa_sink_volume_change *c = s->thread_info.volume_changes;
3336 pa_assert(s);
3337 s->thread_info.volume_changes = NULL;
3338 s->thread_info.volume_changes_tail = NULL;
3339 while (c) {
3340 pa_sink_volume_change *next = c->next;
3341 pa_sink_volume_change_free(c);
3342 c = next;
3343 }
3344 }
3345
3346 /* Called from the IO thread. */
3347 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3348 pa_usec_t now = pa_rtclock_now();
3349 pa_bool_t ret = FALSE;
3350
3351 pa_assert(s);
3352 pa_assert(s->write_volume);
3353
3354 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3355 pa_sink_volume_change *c = s->thread_info.volume_changes;
3356 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3357 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3358 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3359 ret = TRUE;
3360 s->thread_info.current_hw_volume = c->hw_volume;
3361 pa_sink_volume_change_free(c);
3362 }
3363
3364 if (s->write_volume && ret)
3365 s->write_volume(s);
3366
3367 if (s->thread_info.volume_changes) {
3368 if (usec_to_next)
3369 *usec_to_next = s->thread_info.volume_changes->at - now;
3370 if (pa_log_ratelimit(PA_LOG_DEBUG))
3371 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3372 }
3373 else {
3374 if (usec_to_next)
3375 *usec_to_next = 0;
3376 s->thread_info.volume_changes_tail = NULL;
3377 }
3378 return ret;
3379 }
3380
3381 /* Called from the IO thread. */
3382 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3383 /* All the queued volume events later than current latency are shifted to happen earlier. */
3384 pa_sink_volume_change *c;
3385 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3386 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3387 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3388
3389 pa_log_debug("latency = %lld", (long long) limit);
3390 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3391
3392 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3393 pa_usec_t modified_limit = limit;
3394 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3395 modified_limit -= s->thread_info.volume_change_safety_margin;
3396 else
3397 modified_limit += s->thread_info.volume_change_safety_margin;
3398 if (c->at > modified_limit) {
3399 c->at -= rewound;
3400 if (c->at < modified_limit)
3401 c->at = modified_limit;
3402 }
3403 prev_vol = pa_cvolume_avg(&c->hw_volume);
3404 }
3405 pa_sink_volume_change_apply(s, NULL);
3406 }
3407
3408 /* Called from the main thread */
3409 /* Gets the list of formats supported by the sink. The members and idxset must
3410 * be freed by the caller. */
3411 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3412 pa_idxset *ret;
3413
3414 pa_assert(s);
3415
3416 if (s->get_formats) {
3417 /* Sink supports format query, all is good */
3418 ret = s->get_formats(s);
3419 } else {
3420 /* Sink doesn't support format query, so assume it does PCM */
3421 pa_format_info *f = pa_format_info_new();
3422 f->encoding = PA_ENCODING_PCM;
3423
3424 ret = pa_idxset_new(NULL, NULL);
3425 pa_idxset_put(ret, f, NULL);
3426 }
3427
3428 return ret;
3429 }
3430
3431 /* Called from the main thread */
3432 /* Checks if the sink can accept this format */
3433 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3434 {
3435 pa_idxset *formats = NULL;
3436 pa_bool_t ret = FALSE;
3437
3438 pa_assert(s);
3439 pa_assert(f);
3440
3441 formats = pa_sink_get_formats(s);
3442
3443 if (formats) {
3444 pa_format_info *finfo_device;
3445 uint32_t i;
3446
3447 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3448 if (pa_format_info_is_compatible(finfo_device, f)) {
3449 ret = TRUE;
3450 break;
3451 }
3452 }
3453
3454 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3455 }
3456
3457 return ret;
3458 }
3459
3460 /* Called from the main thread */
3461 /* Calculates the intersection between formats supported by the sink and
3462 * in_formats, and returns these, in the order of the sink's formats. */
3463 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3464 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3465 pa_format_info *f_sink, *f_in;
3466 uint32_t i, j;
3467
3468 pa_assert(s);
3469
3470 if (!in_formats || pa_idxset_isempty(in_formats))
3471 goto done;
3472
3473 sink_formats = pa_sink_get_formats(s);
3474
3475 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3476 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3477 if (pa_format_info_is_compatible(f_sink, f_in))
3478 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3479 }
3480 }
3481
3482 done:
3483 if (sink_formats)
3484 pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3485
3486 return out_formats;
3487 }