]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
sink, source: Send notifications when flags change
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
51
52 #include "sink.h"
53
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
61
62 struct pa_sink_volume_change {
63 pa_usec_t at;
64 pa_cvolume hw_volume;
65
66 PA_LLIST_FIELDS(pa_sink_volume_change);
67 };
68
69 struct sink_message_set_port {
70 pa_device_port *port;
71 int ret;
72 };
73
74 static void sink_free(pa_object *s);
75
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
79
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
81 pa_assert(data);
82
83 pa_zero(*data);
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
86
87 return data;
88 }
89
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
91 pa_assert(data);
92
93 pa_xfree(data->name);
94 data->name = pa_xstrdup(name);
95 }
96
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 pa_assert(data);
99
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
102 }
103
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 pa_assert(data);
106
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
109 }
110
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 pa_assert(data);
113
114 data->alternate_sample_rate_is_set = TRUE;
115 data->alternate_sample_rate = alternate_sample_rate;
116 }
117
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 pa_assert(data);
120
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
123 }
124
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
126 pa_assert(data);
127
128 data->muted_is_set = TRUE;
129 data->muted = !!mute;
130 }
131
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_assert(data);
134
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
137 }
138
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_assert(data);
141
142 pa_proplist_free(data->proplist);
143
144 if (data->ports)
145 pa_hashmap_free(data->ports, (pa_free_cb_t) pa_device_port_unref);
146
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
149 }
150
151
152 /* Called from main context */
153 static void reset_callbacks(pa_sink *s) {
154 pa_assert(s);
155
156 s->set_state = NULL;
157 s->get_volume = NULL;
158 s->set_volume = NULL;
159 s->write_volume = NULL;
160 s->get_mute = NULL;
161 s->set_mute = NULL;
162 s->request_rewind = NULL;
163 s->update_requested_latency = NULL;
164 s->set_port = NULL;
165 s->get_formats = NULL;
166 s->set_formats = NULL;
167 s->update_rate = NULL;
168 }
169
170 /* Called from main context */
171 pa_sink* pa_sink_new(
172 pa_core *core,
173 pa_sink_new_data *data,
174 pa_sink_flags_t flags) {
175
176 pa_sink *s;
177 const char *name;
178 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
179 pa_source_new_data source_data;
180 const char *dn;
181 char *pt;
182
183 pa_assert(core);
184 pa_assert(data);
185 pa_assert(data->name);
186 pa_assert_ctl_context();
187
188 s = pa_msgobject_new(pa_sink);
189
190 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
191 pa_log_debug("Failed to register name %s.", data->name);
192 pa_xfree(s);
193 return NULL;
194 }
195
196 pa_sink_new_data_set_name(data, name);
197
198 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
199 pa_xfree(s);
200 pa_namereg_unregister(core, name);
201 return NULL;
202 }
203
204 /* FIXME, need to free s here on failure */
205
206 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
207 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
208
209 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
210
211 if (!data->channel_map_is_set)
212 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
213
214 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
215 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
216
217 /* FIXME: There should probably be a general function for checking whether
218 * the sink volume is allowed to be set, like there is for sink inputs. */
219 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
220
221 if (!data->volume_is_set) {
222 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
223 data->save_volume = FALSE;
224 }
225
226 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
227 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
228
229 if (!data->muted_is_set)
230 data->muted = FALSE;
231
232 if (data->card)
233 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
234
235 pa_device_init_description(data->proplist);
236 pa_device_init_icon(data->proplist, TRUE);
237 pa_device_init_intended_roles(data->proplist);
238
239 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
240 pa_xfree(s);
241 pa_namereg_unregister(core, name);
242 return NULL;
243 }
244
245 s->parent.parent.free = sink_free;
246 s->parent.process_msg = pa_sink_process_msg;
247
248 s->core = core;
249 s->state = PA_SINK_INIT;
250 s->flags = flags;
251 s->priority = 0;
252 s->suspend_cause = data->suspend_cause;
253 pa_sink_set_mixer_dirty(s, FALSE);
254 s->name = pa_xstrdup(name);
255 s->proplist = pa_proplist_copy(data->proplist);
256 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
257 s->module = data->module;
258 s->card = data->card;
259
260 s->priority = pa_device_init_priority(s->proplist);
261
262 s->sample_spec = data->sample_spec;
263 s->channel_map = data->channel_map;
264 s->default_sample_rate = s->sample_spec.rate;
265
266 if (data->alternate_sample_rate_is_set)
267 s->alternate_sample_rate = data->alternate_sample_rate;
268 else
269 s->alternate_sample_rate = s->core->alternate_sample_rate;
270
271 if (s->sample_spec.rate == s->alternate_sample_rate) {
272 pa_log_warn("Default and alternate sample rates are the same.");
273 s->alternate_sample_rate = 0;
274 }
275
276 s->inputs = pa_idxset_new(NULL, NULL);
277 s->n_corked = 0;
278 s->input_to_master = NULL;
279
280 s->reference_volume = s->real_volume = data->volume;
281 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
282 s->base_volume = PA_VOLUME_NORM;
283 s->n_volume_steps = PA_VOLUME_NORM+1;
284 s->muted = data->muted;
285 s->refresh_volume = s->refresh_muted = FALSE;
286
287 reset_callbacks(s);
288 s->userdata = NULL;
289
290 s->asyncmsgq = NULL;
291
292 /* As a minor optimization we just steal the list instead of
293 * copying it here */
294 s->ports = data->ports;
295 data->ports = NULL;
296
297 s->active_port = NULL;
298 s->save_port = FALSE;
299
300 if (data->active_port)
301 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
302 s->save_port = data->save_port;
303
304 if (!s->active_port) {
305 void *state;
306 pa_device_port *p;
307
308 PA_HASHMAP_FOREACH(p, s->ports, state)
309 if (!s->active_port || p->priority > s->active_port->priority)
310 s->active_port = p;
311 }
312
313 if (s->active_port)
314 s->latency_offset = s->active_port->latency_offset;
315 else
316 s->latency_offset = 0;
317
318 s->save_volume = data->save_volume;
319 s->save_muted = data->save_muted;
320
321 pa_silence_memchunk_get(
322 &core->silence_cache,
323 core->mempool,
324 &s->silence,
325 &s->sample_spec,
326 0);
327
328 s->thread_info.rtpoll = NULL;
329 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = FALSE;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = FALSE;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
342
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.latency_offset = s->latency_offset;
349
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
352
353 if (s->card)
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
355
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
358 s->index,
359 s->name,
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
362 pt);
363 pa_xfree(pt);
364
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
373
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
377
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
381
382 pa_source_new_data_done(&source_data);
383
384 if (!s->monitor_source) {
385 pa_sink_unlink(s);
386 pa_sink_unref(s);
387 return NULL;
388 }
389
390 s->monitor_source->monitor_of = s;
391
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
395
396 return s;
397 }
398
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
401 int ret;
402 pa_bool_t suspend_change;
403 pa_sink_state_t original_state;
404
405 pa_assert(s);
406 pa_assert_ctl_context();
407
408 if (s->state == state)
409 return 0;
410
411 original_state = s->state;
412
413 suspend_change =
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
416
417 if (s->set_state)
418 if ((ret = s->set_state(s, state)) < 0)
419 return ret;
420
421 if (s->asyncmsgq)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
423
424 if (s->set_state)
425 s->set_state(s, original_state);
426
427 return ret;
428 }
429
430 s->state = state;
431
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
435 }
436
437 if (suspend_change) {
438 pa_sink_input *i;
439 uint32_t idx;
440
441 /* We're suspending or resuming, tell everyone about it */
442
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
447 else if (i->suspend)
448 i->suspend(i, state == PA_SINK_SUSPENDED);
449
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
452 }
453
454 return 0;
455 }
456
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
458 pa_assert(s);
459
460 s->get_volume = cb;
461 }
462
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
465
466 pa_assert(s);
467 pa_assert(!s->write_volume || cb);
468
469 s->set_volume = cb;
470
471 /* Save the current flags so we can tell if they've changed */
472 flags = s->flags;
473
474 if (cb) {
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
477 } else {
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
481 }
482
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 }
487
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
490
491 pa_assert(s);
492 pa_assert(!cb || s->set_volume);
493
494 s->write_volume = cb;
495
496 /* Save the current flags so we can tell if they've changed */
497 flags = s->flags;
498
499 if (cb)
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
501 else
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
503
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 }
508
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
510 pa_assert(s);
511
512 s->get_mute = cb;
513 }
514
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
517
518 pa_assert(s);
519
520 s->set_mute = cb;
521
522 /* Save the current flags so we can tell if they've changed */
523 flags = s->flags;
524
525 if (cb)
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
527 else
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
529
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
533 }
534
535 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
536 pa_sink_flags_t flags;
537
538 pa_assert(s);
539
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
542
543 /* Save the current flags so we can tell if they've changed */
544 flags = s->flags;
545
546 if (enable)
547 s->flags |= PA_SINK_FLAT_VOLUME;
548 else
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
550
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
554 }
555
556 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
557 pa_sink_flags_t flags;
558
559 pa_assert(s);
560
561 /* Save the current flags so we can tell if they've changed */
562 flags = s->flags;
563
564 if (enable) {
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, TRUE);
567 } else {
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, FALSE);
570 }
571
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
575 }
576
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
581
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
584
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
588
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
592 *
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
596 *
597 * Note: All of these flags set here can change over the life time
598 * of the sink. */
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
602
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
612 *
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
615 pa_sink_enable_decibel_volume(s, TRUE);
616
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
620 enable_flat_volume(s, TRUE);
621
622 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
623 pa_sink *root_sink = pa_sink_get_master(s);
624
625 pa_assert(root_sink);
626
627 s->reference_volume = root_sink->reference_volume;
628 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
629
630 s->real_volume = root_sink->real_volume;
631 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
632 } else
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s->reference_volume = s->real_volume;
637
638 s->thread_info.soft_volume = s->soft_volume;
639 s->thread_info.soft_muted = s->muted;
640 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
641
642 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
643 || (s->base_volume == PA_VOLUME_NORM
644 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
645 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
646 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
647 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
649
650 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
651 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
652 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
653
654 if (s->suspend_cause)
655 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
656 else
657 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
658
659 pa_source_put(s->monitor_source);
660
661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
663 }
664
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink* s) {
667 pa_bool_t linked;
668 pa_sink_input *i, *j = NULL;
669
670 pa_assert(s);
671 pa_assert_ctl_context();
672
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
676
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
679 * effects. */
680
681 linked = PA_SINK_IS_LINKED(s->state);
682
683 if (linked)
684 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
685
686 if (s->state != PA_SINK_UNLINKED)
687 pa_namereg_unregister(s->core, s->name);
688 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
689
690 if (s->card)
691 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
692
693 while ((i = pa_idxset_first(s->inputs, NULL))) {
694 pa_assert(i != j);
695 pa_sink_input_kill(i);
696 j = i;
697 }
698
699 if (linked)
700 sink_set_state(s, PA_SINK_UNLINKED);
701 else
702 s->state = PA_SINK_UNLINKED;
703
704 reset_callbacks(s);
705
706 if (s->monitor_source)
707 pa_source_unlink(s->monitor_source);
708
709 if (linked) {
710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
712 }
713 }
714
715 /* Called from main context */
716 static void sink_free(pa_object *o) {
717 pa_sink *s = PA_SINK(o);
718
719 pa_assert(s);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s) == 0);
722
723 if (PA_SINK_IS_LINKED(s->state))
724 pa_sink_unlink(s);
725
726 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
727
728 if (s->monitor_source) {
729 pa_source_unref(s->monitor_source);
730 s->monitor_source = NULL;
731 }
732
733 pa_idxset_free(s->inputs, NULL);
734 pa_hashmap_free(s->thread_info.inputs, (pa_free_cb_t) pa_sink_input_unref);
735
736 if (s->silence.memblock)
737 pa_memblock_unref(s->silence.memblock);
738
739 pa_xfree(s->name);
740 pa_xfree(s->driver);
741
742 if (s->proplist)
743 pa_proplist_free(s->proplist);
744
745 if (s->ports)
746 pa_hashmap_free(s->ports, (pa_free_cb_t) pa_device_port_unref);
747
748 pa_xfree(s);
749 }
750
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
753 pa_sink_assert_ref(s);
754 pa_assert_ctl_context();
755
756 s->asyncmsgq = q;
757
758 if (s->monitor_source)
759 pa_source_set_asyncmsgq(s->monitor_source, q);
760 }
761
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
764 pa_sink_flags_t old_flags;
765 pa_sink_input *input;
766 uint32_t idx;
767
768 pa_sink_assert_ref(s);
769 pa_assert_ctl_context();
770
771 /* For now, allow only a minimal set of flags to be changed. */
772 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
773
774 old_flags = s->flags;
775 s->flags = (s->flags & ~mask) | (value & mask);
776
777 if (s->flags == old_flags)
778 return;
779
780 if ((s->flags & PA_SINK_LATENCY) != (old_flags & PA_SINK_LATENCY))
781 pa_log_debug("Sink %s: LATENCY flag %s.", s->name, (s->flags & PA_SINK_LATENCY) ? "enabled" : "disabled");
782
783 if ((s->flags & PA_SINK_DYNAMIC_LATENCY) != (old_flags & PA_SINK_DYNAMIC_LATENCY))
784 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
785 s->name, (s->flags & PA_SINK_DYNAMIC_LATENCY) ? "enabled" : "disabled");
786
787 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
788 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_FLAGS_CHANGED], s);
789
790 if (s->monitor_source)
791 pa_source_update_flags(s->monitor_source,
792 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
793 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
794 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
795 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
796
797 PA_IDXSET_FOREACH(input, s->inputs, idx) {
798 if (input->origin_sink)
799 pa_sink_update_flags(input->origin_sink, mask, value);
800 }
801 }
802
803 /* Called from IO context, or before _put() from main context */
804 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
805 pa_sink_assert_ref(s);
806 pa_sink_assert_io_context(s);
807
808 s->thread_info.rtpoll = p;
809
810 if (s->monitor_source)
811 pa_source_set_rtpoll(s->monitor_source, p);
812 }
813
814 /* Called from main context */
815 int pa_sink_update_status(pa_sink*s) {
816 pa_sink_assert_ref(s);
817 pa_assert_ctl_context();
818 pa_assert(PA_SINK_IS_LINKED(s->state));
819
820 if (s->state == PA_SINK_SUSPENDED)
821 return 0;
822
823 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
824 }
825
826 /* Called from any context - must be threadsafe */
827 void pa_sink_set_mixer_dirty(pa_sink *s, pa_bool_t is_dirty)
828 {
829 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
830 }
831
832 /* Called from main context */
833 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
834 pa_sink_assert_ref(s);
835 pa_assert_ctl_context();
836 pa_assert(PA_SINK_IS_LINKED(s->state));
837 pa_assert(cause != 0);
838
839 if (suspend) {
840 s->suspend_cause |= cause;
841 s->monitor_source->suspend_cause |= cause;
842 } else {
843 s->suspend_cause &= ~cause;
844 s->monitor_source->suspend_cause &= ~cause;
845 }
846
847 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
848 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
849 it'll be handled just fine. */
850 pa_sink_set_mixer_dirty(s, FALSE);
851 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
852 if (s->active_port && s->set_port) {
853 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
854 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
855 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
856 }
857 else
858 s->set_port(s, s->active_port);
859 }
860 else {
861 if (s->set_mute)
862 s->set_mute(s);
863 if (s->set_volume)
864 s->set_volume(s);
865 }
866 }
867
868 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
869 return 0;
870
871 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
872
873 if (s->suspend_cause)
874 return sink_set_state(s, PA_SINK_SUSPENDED);
875 else
876 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
877 }
878
879 /* Called from main context */
880 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
881 pa_sink_input *i, *n;
882 uint32_t idx;
883
884 pa_sink_assert_ref(s);
885 pa_assert_ctl_context();
886 pa_assert(PA_SINK_IS_LINKED(s->state));
887
888 if (!q)
889 q = pa_queue_new();
890
891 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
892 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
893
894 pa_sink_input_ref(i);
895
896 if (pa_sink_input_start_move(i) >= 0)
897 pa_queue_push(q, i);
898 else
899 pa_sink_input_unref(i);
900 }
901
902 return q;
903 }
904
905 /* Called from main context */
906 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
907 pa_sink_input *i;
908
909 pa_sink_assert_ref(s);
910 pa_assert_ctl_context();
911 pa_assert(PA_SINK_IS_LINKED(s->state));
912 pa_assert(q);
913
914 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
915 if (pa_sink_input_finish_move(i, s, save) < 0)
916 pa_sink_input_fail_move(i);
917
918 pa_sink_input_unref(i);
919 }
920
921 pa_queue_free(q, NULL);
922 }
923
924 /* Called from main context */
925 void pa_sink_move_all_fail(pa_queue *q) {
926 pa_sink_input *i;
927
928 pa_assert_ctl_context();
929 pa_assert(q);
930
931 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
932 pa_sink_input_fail_move(i);
933 pa_sink_input_unref(i);
934 }
935
936 pa_queue_free(q, NULL);
937 }
938
939 /* Called from IO thread context */
940 size_t pa_sink_process_input_underruns(pa_sink *s, size_t left_to_play) {
941 pa_sink_input *i;
942 void *state = NULL;
943 size_t result = 0;
944
945 pa_sink_assert_ref(s);
946 pa_sink_assert_io_context(s);
947
948 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
949 size_t uf = i->thread_info.underrun_for_sink;
950 if (uf == 0)
951 continue;
952 if (uf >= left_to_play) {
953 if (pa_sink_input_process_underrun(i))
954 continue;
955 }
956 else if (uf > result)
957 result = uf;
958 }
959
960 if (result > 0)
961 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result, (long) left_to_play - result);
962 return left_to_play - result;
963 }
964
965 /* Called from IO thread context */
966 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
967 pa_sink_input *i;
968 void *state = NULL;
969
970 pa_sink_assert_ref(s);
971 pa_sink_assert_io_context(s);
972 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
973
974 /* If nobody requested this and this is actually no real rewind
975 * then we can short cut this. Please note that this means that
976 * not all rewind requests triggered upstream will always be
977 * translated in actual requests! */
978 if (!s->thread_info.rewind_requested && nbytes <= 0)
979 return;
980
981 s->thread_info.rewind_nbytes = 0;
982 s->thread_info.rewind_requested = FALSE;
983
984 if (nbytes > 0) {
985 pa_log_debug("Processing rewind...");
986 if (s->flags & PA_SINK_DEFERRED_VOLUME)
987 pa_sink_volume_change_rewind(s, nbytes);
988 }
989
990 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
991 pa_sink_input_assert_ref(i);
992 pa_sink_input_process_rewind(i, nbytes);
993 }
994
995 if (nbytes > 0) {
996 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
997 pa_source_process_rewind(s->monitor_source, nbytes);
998 }
999 }
1000
1001 /* Called from IO thread context */
1002 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
1003 pa_sink_input *i;
1004 unsigned n = 0;
1005 void *state = NULL;
1006 size_t mixlength = *length;
1007
1008 pa_sink_assert_ref(s);
1009 pa_sink_assert_io_context(s);
1010 pa_assert(info);
1011
1012 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
1013 pa_sink_input_assert_ref(i);
1014
1015 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
1016
1017 if (mixlength == 0 || info->chunk.length < mixlength)
1018 mixlength = info->chunk.length;
1019
1020 if (pa_memblock_is_silence(info->chunk.memblock)) {
1021 pa_memblock_unref(info->chunk.memblock);
1022 continue;
1023 }
1024
1025 info->userdata = pa_sink_input_ref(i);
1026
1027 pa_assert(info->chunk.memblock);
1028 pa_assert(info->chunk.length > 0);
1029
1030 info++;
1031 n++;
1032 maxinfo--;
1033 }
1034
1035 if (mixlength > 0)
1036 *length = mixlength;
1037
1038 return n;
1039 }
1040
1041 /* Called from IO thread context */
1042 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
1043 pa_sink_input *i;
1044 void *state;
1045 unsigned p = 0;
1046 unsigned n_unreffed = 0;
1047
1048 pa_sink_assert_ref(s);
1049 pa_sink_assert_io_context(s);
1050 pa_assert(result);
1051 pa_assert(result->memblock);
1052 pa_assert(result->length > 0);
1053
1054 /* We optimize for the case where the order of the inputs has not changed */
1055
1056 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1057 unsigned j;
1058 pa_mix_info* m = NULL;
1059
1060 pa_sink_input_assert_ref(i);
1061
1062 /* Let's try to find the matching entry info the pa_mix_info array */
1063 for (j = 0; j < n; j ++) {
1064
1065 if (info[p].userdata == i) {
1066 m = info + p;
1067 break;
1068 }
1069
1070 p++;
1071 if (p >= n)
1072 p = 0;
1073 }
1074
1075 /* Drop read data */
1076 pa_sink_input_drop(i, result->length);
1077
1078 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1079
1080 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1081 void *ostate = NULL;
1082 pa_source_output *o;
1083 pa_memchunk c;
1084
1085 if (m && m->chunk.memblock) {
1086 c = m->chunk;
1087 pa_memblock_ref(c.memblock);
1088 pa_assert(result->length <= c.length);
1089 c.length = result->length;
1090
1091 pa_memchunk_make_writable(&c, 0);
1092 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1093 } else {
1094 c = s->silence;
1095 pa_memblock_ref(c.memblock);
1096 pa_assert(result->length <= c.length);
1097 c.length = result->length;
1098 }
1099
1100 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1101 pa_source_output_assert_ref(o);
1102 pa_assert(o->direct_on_input == i);
1103 pa_source_post_direct(s->monitor_source, o, &c);
1104 }
1105
1106 pa_memblock_unref(c.memblock);
1107 }
1108 }
1109
1110 if (m) {
1111 if (m->chunk.memblock)
1112 pa_memblock_unref(m->chunk.memblock);
1113 pa_memchunk_reset(&m->chunk);
1114
1115 pa_sink_input_unref(m->userdata);
1116 m->userdata = NULL;
1117
1118 n_unreffed += 1;
1119 }
1120 }
1121
1122 /* Now drop references to entries that are included in the
1123 * pa_mix_info array but don't exist anymore */
1124
1125 if (n_unreffed < n) {
1126 for (; n > 0; info++, n--) {
1127 if (info->userdata)
1128 pa_sink_input_unref(info->userdata);
1129 if (info->chunk.memblock)
1130 pa_memblock_unref(info->chunk.memblock);
1131 }
1132 }
1133
1134 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1135 pa_source_post(s->monitor_source, result);
1136 }
1137
1138 /* Called from IO thread context */
1139 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1140 pa_mix_info info[MAX_MIX_CHANNELS];
1141 unsigned n;
1142 size_t block_size_max;
1143
1144 pa_sink_assert_ref(s);
1145 pa_sink_assert_io_context(s);
1146 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1147 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1148 pa_assert(result);
1149
1150 pa_assert(!s->thread_info.rewind_requested);
1151 pa_assert(s->thread_info.rewind_nbytes == 0);
1152
1153 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1154 result->memblock = pa_memblock_ref(s->silence.memblock);
1155 result->index = s->silence.index;
1156 result->length = PA_MIN(s->silence.length, length);
1157 return;
1158 }
1159
1160 pa_sink_ref(s);
1161
1162 if (length <= 0)
1163 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1164
1165 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1166 if (length > block_size_max)
1167 length = pa_frame_align(block_size_max, &s->sample_spec);
1168
1169 pa_assert(length > 0);
1170
1171 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1172
1173 if (n == 0) {
1174
1175 *result = s->silence;
1176 pa_memblock_ref(result->memblock);
1177
1178 if (result->length > length)
1179 result->length = length;
1180
1181 } else if (n == 1) {
1182 pa_cvolume volume;
1183
1184 *result = info[0].chunk;
1185 pa_memblock_ref(result->memblock);
1186
1187 if (result->length > length)
1188 result->length = length;
1189
1190 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1191
1192 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1193 pa_memblock_unref(result->memblock);
1194 pa_silence_memchunk_get(&s->core->silence_cache,
1195 s->core->mempool,
1196 result,
1197 &s->sample_spec,
1198 result->length);
1199 } else if (!pa_cvolume_is_norm(&volume)) {
1200 pa_memchunk_make_writable(result, 0);
1201 pa_volume_memchunk(result, &s->sample_spec, &volume);
1202 }
1203 } else {
1204 void *ptr;
1205 result->memblock = pa_memblock_new(s->core->mempool, length);
1206
1207 ptr = pa_memblock_acquire(result->memblock);
1208 result->length = pa_mix(info, n,
1209 ptr, length,
1210 &s->sample_spec,
1211 &s->thread_info.soft_volume,
1212 s->thread_info.soft_muted);
1213 pa_memblock_release(result->memblock);
1214
1215 result->index = 0;
1216 }
1217
1218 inputs_drop(s, info, n, result);
1219
1220 pa_sink_unref(s);
1221 }
1222
1223 /* Called from IO thread context */
1224 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1225 pa_mix_info info[MAX_MIX_CHANNELS];
1226 unsigned n;
1227 size_t length, block_size_max;
1228
1229 pa_sink_assert_ref(s);
1230 pa_sink_assert_io_context(s);
1231 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1232 pa_assert(target);
1233 pa_assert(target->memblock);
1234 pa_assert(target->length > 0);
1235 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1236
1237 pa_assert(!s->thread_info.rewind_requested);
1238 pa_assert(s->thread_info.rewind_nbytes == 0);
1239
1240 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1241 pa_silence_memchunk(target, &s->sample_spec);
1242 return;
1243 }
1244
1245 pa_sink_ref(s);
1246
1247 length = target->length;
1248 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1249 if (length > block_size_max)
1250 length = pa_frame_align(block_size_max, &s->sample_spec);
1251
1252 pa_assert(length > 0);
1253
1254 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1255
1256 if (n == 0) {
1257 if (target->length > length)
1258 target->length = length;
1259
1260 pa_silence_memchunk(target, &s->sample_spec);
1261 } else if (n == 1) {
1262 pa_cvolume volume;
1263
1264 if (target->length > length)
1265 target->length = length;
1266
1267 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1268
1269 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1270 pa_silence_memchunk(target, &s->sample_spec);
1271 else {
1272 pa_memchunk vchunk;
1273
1274 vchunk = info[0].chunk;
1275 pa_memblock_ref(vchunk.memblock);
1276
1277 if (vchunk.length > length)
1278 vchunk.length = length;
1279
1280 if (!pa_cvolume_is_norm(&volume)) {
1281 pa_memchunk_make_writable(&vchunk, 0);
1282 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1283 }
1284
1285 pa_memchunk_memcpy(target, &vchunk);
1286 pa_memblock_unref(vchunk.memblock);
1287 }
1288
1289 } else {
1290 void *ptr;
1291
1292 ptr = pa_memblock_acquire(target->memblock);
1293
1294 target->length = pa_mix(info, n,
1295 (uint8_t*) ptr + target->index, length,
1296 &s->sample_spec,
1297 &s->thread_info.soft_volume,
1298 s->thread_info.soft_muted);
1299
1300 pa_memblock_release(target->memblock);
1301 }
1302
1303 inputs_drop(s, info, n, target);
1304
1305 pa_sink_unref(s);
1306 }
1307
1308 /* Called from IO thread context */
1309 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1310 pa_memchunk chunk;
1311 size_t l, d;
1312
1313 pa_sink_assert_ref(s);
1314 pa_sink_assert_io_context(s);
1315 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1316 pa_assert(target);
1317 pa_assert(target->memblock);
1318 pa_assert(target->length > 0);
1319 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1320
1321 pa_assert(!s->thread_info.rewind_requested);
1322 pa_assert(s->thread_info.rewind_nbytes == 0);
1323
1324 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1325 pa_silence_memchunk(target, &s->sample_spec);
1326 return;
1327 }
1328
1329 pa_sink_ref(s);
1330
1331 l = target->length;
1332 d = 0;
1333 while (l > 0) {
1334 chunk = *target;
1335 chunk.index += d;
1336 chunk.length -= d;
1337
1338 pa_sink_render_into(s, &chunk);
1339
1340 d += chunk.length;
1341 l -= chunk.length;
1342 }
1343
1344 pa_sink_unref(s);
1345 }
1346
1347 /* Called from IO thread context */
1348 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1349 pa_sink_assert_ref(s);
1350 pa_sink_assert_io_context(s);
1351 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1352 pa_assert(length > 0);
1353 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1354 pa_assert(result);
1355
1356 pa_assert(!s->thread_info.rewind_requested);
1357 pa_assert(s->thread_info.rewind_nbytes == 0);
1358
1359 pa_sink_ref(s);
1360
1361 pa_sink_render(s, length, result);
1362
1363 if (result->length < length) {
1364 pa_memchunk chunk;
1365
1366 pa_memchunk_make_writable(result, length);
1367
1368 chunk.memblock = result->memblock;
1369 chunk.index = result->index + result->length;
1370 chunk.length = length - result->length;
1371
1372 pa_sink_render_into_full(s, &chunk);
1373
1374 result->length = length;
1375 }
1376
1377 pa_sink_unref(s);
1378 }
1379
1380 /* Called from main thread */
1381 pa_bool_t pa_sink_update_rate(pa_sink *s, uint32_t rate, pa_bool_t passthrough)
1382 {
1383 if (s->update_rate) {
1384 uint32_t desired_rate = rate;
1385 uint32_t default_rate = s->default_sample_rate;
1386 uint32_t alternate_rate = s->alternate_sample_rate;
1387 uint32_t idx;
1388 pa_sink_input *i;
1389 pa_bool_t use_alternate = FALSE;
1390
1391 if (PA_UNLIKELY(default_rate == alternate_rate)) {
1392 pa_log_warn("Default and alternate sample rates are the same.");
1393 return FALSE;
1394 }
1395
1396 if (PA_SINK_IS_RUNNING(s->state)) {
1397 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1398 s->sample_spec.rate);
1399 return FALSE;
1400 }
1401
1402 if (s->monitor_source) {
1403 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == TRUE) {
1404 pa_log_info("Cannot update rate, monitor source is RUNNING");
1405 return FALSE;
1406 }
1407 }
1408
1409 if (PA_UNLIKELY (desired_rate < 8000 ||
1410 desired_rate > PA_RATE_MAX))
1411 return FALSE;
1412
1413 if (!passthrough) {
1414 pa_assert(default_rate % 4000 || default_rate % 11025);
1415 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
1416
1417 if (default_rate % 4000) {
1418 /* default is a 11025 multiple */
1419 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1420 use_alternate=TRUE;
1421 } else {
1422 /* default is 4000 multiple */
1423 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1424 use_alternate=TRUE;
1425 }
1426
1427 if (use_alternate)
1428 desired_rate = alternate_rate;
1429 else
1430 desired_rate = default_rate;
1431 } else {
1432 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1433 }
1434
1435 if (desired_rate == s->sample_spec.rate)
1436 return FALSE;
1437
1438 if (!passthrough && pa_sink_used_by(s) > 0)
1439 return FALSE;
1440
1441 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1442 pa_sink_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1443
1444 if (s->update_rate(s, desired_rate) == TRUE) {
1445 /* update monitor source as well */
1446 if (s->monitor_source && !passthrough)
1447 pa_source_update_rate(s->monitor_source, desired_rate, FALSE);
1448 pa_log_info("Changed sampling rate successfully");
1449
1450 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1451 if (i->state == PA_SINK_INPUT_CORKED)
1452 pa_sink_input_update_rate(i);
1453 }
1454
1455 return TRUE;
1456 }
1457 }
1458 return FALSE;
1459 }
1460
1461 /* Called from main thread */
1462 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1463 pa_usec_t usec = 0;
1464
1465 pa_sink_assert_ref(s);
1466 pa_assert_ctl_context();
1467 pa_assert(PA_SINK_IS_LINKED(s->state));
1468
1469 /* The returned value is supposed to be in the time domain of the sound card! */
1470
1471 if (s->state == PA_SINK_SUSPENDED)
1472 return 0;
1473
1474 if (!(s->flags & PA_SINK_LATENCY))
1475 return 0;
1476
1477 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1478
1479 /* usec is unsigned, so check that the offset can be added to usec without
1480 * underflowing. */
1481 if (-s->latency_offset <= (int64_t) usec)
1482 usec += s->latency_offset;
1483 else
1484 usec = 0;
1485
1486 return usec;
1487 }
1488
1489 /* Called from IO thread */
1490 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1491 pa_usec_t usec = 0;
1492 pa_msgobject *o;
1493
1494 pa_sink_assert_ref(s);
1495 pa_sink_assert_io_context(s);
1496 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1497
1498 /* The returned value is supposed to be in the time domain of the sound card! */
1499
1500 if (s->thread_info.state == PA_SINK_SUSPENDED)
1501 return 0;
1502
1503 if (!(s->flags & PA_SINK_LATENCY))
1504 return 0;
1505
1506 o = PA_MSGOBJECT(s);
1507
1508 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1509
1510 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1511 return -1;
1512
1513 /* usec is unsigned, so check that the offset can be added to usec without
1514 * underflowing. */
1515 if (-s->thread_info.latency_offset <= (int64_t) usec)
1516 usec += s->thread_info.latency_offset;
1517 else
1518 usec = 0;
1519
1520 return usec;
1521 }
1522
1523 /* Called from the main thread (and also from the IO thread while the main
1524 * thread is waiting).
1525 *
1526 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1527 * set. Instead, flat volume mode is detected by checking whether the root sink
1528 * has the flag set. */
1529 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1530 pa_sink_assert_ref(s);
1531
1532 s = pa_sink_get_master(s);
1533
1534 if (PA_LIKELY(s))
1535 return (s->flags & PA_SINK_FLAT_VOLUME);
1536 else
1537 return FALSE;
1538 }
1539
1540 /* Called from the main thread (and also from the IO thread while the main
1541 * thread is waiting). */
1542 pa_sink *pa_sink_get_master(pa_sink *s) {
1543 pa_sink_assert_ref(s);
1544
1545 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1546 if (PA_UNLIKELY(!s->input_to_master))
1547 return NULL;
1548
1549 s = s->input_to_master->sink;
1550 }
1551
1552 return s;
1553 }
1554
1555 /* Called from main context */
1556 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1557 pa_sink_input *alt_i;
1558 uint32_t idx;
1559
1560 pa_sink_assert_ref(s);
1561
1562 /* one and only one PASSTHROUGH input can possibly be connected */
1563 if (pa_idxset_size(s->inputs) == 1) {
1564 alt_i = pa_idxset_first(s->inputs, &idx);
1565
1566 if (pa_sink_input_is_passthrough(alt_i))
1567 return TRUE;
1568 }
1569
1570 return FALSE;
1571 }
1572
1573 /* Called from main context */
1574 void pa_sink_enter_passthrough(pa_sink *s) {
1575 pa_cvolume volume;
1576
1577 /* disable the monitor in passthrough mode */
1578 if (s->monitor_source) {
1579 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1580 pa_source_suspend(s->monitor_source, TRUE, PA_SUSPEND_PASSTHROUGH);
1581 }
1582
1583 /* set the volume to NORM */
1584 s->saved_volume = *pa_sink_get_volume(s, TRUE);
1585 s->saved_save_volume = s->save_volume;
1586
1587 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1588 pa_sink_set_volume(s, &volume, TRUE, FALSE);
1589 }
1590
1591 /* Called from main context */
1592 void pa_sink_leave_passthrough(pa_sink *s) {
1593 /* Unsuspend monitor */
1594 if (s->monitor_source) {
1595 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1596 pa_source_suspend(s->monitor_source, FALSE, PA_SUSPEND_PASSTHROUGH);
1597 }
1598
1599 /* Restore sink volume to what it was before we entered passthrough mode */
1600 pa_sink_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1601
1602 pa_cvolume_init(&s->saved_volume);
1603 s->saved_save_volume = FALSE;
1604 }
1605
1606 /* Called from main context. */
1607 static void compute_reference_ratio(pa_sink_input *i) {
1608 unsigned c = 0;
1609 pa_cvolume remapped;
1610
1611 pa_assert(i);
1612 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1613
1614 /*
1615 * Calculates the reference ratio from the sink's reference
1616 * volume. This basically calculates:
1617 *
1618 * i->reference_ratio = i->volume / i->sink->reference_volume
1619 */
1620
1621 remapped = i->sink->reference_volume;
1622 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1623
1624 i->reference_ratio.channels = i->sample_spec.channels;
1625
1626 for (c = 0; c < i->sample_spec.channels; c++) {
1627
1628 /* We don't update when the sink volume is 0 anyway */
1629 if (remapped.values[c] <= PA_VOLUME_MUTED)
1630 continue;
1631
1632 /* Don't update the reference ratio unless necessary */
1633 if (pa_sw_volume_multiply(
1634 i->reference_ratio.values[c],
1635 remapped.values[c]) == i->volume.values[c])
1636 continue;
1637
1638 i->reference_ratio.values[c] = pa_sw_volume_divide(
1639 i->volume.values[c],
1640 remapped.values[c]);
1641 }
1642 }
1643
1644 /* Called from main context. Only called for the root sink in volume sharing
1645 * cases, except for internal recursive calls. */
1646 static void compute_reference_ratios(pa_sink *s) {
1647 uint32_t idx;
1648 pa_sink_input *i;
1649
1650 pa_sink_assert_ref(s);
1651 pa_assert_ctl_context();
1652 pa_assert(PA_SINK_IS_LINKED(s->state));
1653 pa_assert(pa_sink_flat_volume_enabled(s));
1654
1655 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1656 compute_reference_ratio(i);
1657
1658 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1659 compute_reference_ratios(i->origin_sink);
1660 }
1661 }
1662
1663 /* Called from main context. Only called for the root sink in volume sharing
1664 * cases, except for internal recursive calls. */
1665 static void compute_real_ratios(pa_sink *s) {
1666 pa_sink_input *i;
1667 uint32_t idx;
1668
1669 pa_sink_assert_ref(s);
1670 pa_assert_ctl_context();
1671 pa_assert(PA_SINK_IS_LINKED(s->state));
1672 pa_assert(pa_sink_flat_volume_enabled(s));
1673
1674 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1675 unsigned c;
1676 pa_cvolume remapped;
1677
1678 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1679 /* The origin sink uses volume sharing, so this input's real ratio
1680 * is handled as a special case - the real ratio must be 0 dB, and
1681 * as a result i->soft_volume must equal i->volume_factor. */
1682 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1683 i->soft_volume = i->volume_factor;
1684
1685 compute_real_ratios(i->origin_sink);
1686
1687 continue;
1688 }
1689
1690 /*
1691 * This basically calculates:
1692 *
1693 * i->real_ratio := i->volume / s->real_volume
1694 * i->soft_volume := i->real_ratio * i->volume_factor
1695 */
1696
1697 remapped = s->real_volume;
1698 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1699
1700 i->real_ratio.channels = i->sample_spec.channels;
1701 i->soft_volume.channels = i->sample_spec.channels;
1702
1703 for (c = 0; c < i->sample_spec.channels; c++) {
1704
1705 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1706 /* We leave i->real_ratio untouched */
1707 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1708 continue;
1709 }
1710
1711 /* Don't lose accuracy unless necessary */
1712 if (pa_sw_volume_multiply(
1713 i->real_ratio.values[c],
1714 remapped.values[c]) != i->volume.values[c])
1715
1716 i->real_ratio.values[c] = pa_sw_volume_divide(
1717 i->volume.values[c],
1718 remapped.values[c]);
1719
1720 i->soft_volume.values[c] = pa_sw_volume_multiply(
1721 i->real_ratio.values[c],
1722 i->volume_factor.values[c]);
1723 }
1724
1725 /* We don't copy the soft_volume to the thread_info data
1726 * here. That must be done by the caller */
1727 }
1728 }
1729
1730 static pa_cvolume *cvolume_remap_minimal_impact(
1731 pa_cvolume *v,
1732 const pa_cvolume *template,
1733 const pa_channel_map *from,
1734 const pa_channel_map *to) {
1735
1736 pa_cvolume t;
1737
1738 pa_assert(v);
1739 pa_assert(template);
1740 pa_assert(from);
1741 pa_assert(to);
1742 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1743 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1744
1745 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1746 * mapping from sink input to sink volumes:
1747 *
1748 * If template is a possible remapping from v it is used instead
1749 * of remapping anew.
1750 *
1751 * If the channel maps don't match we set an all-channel volume on
1752 * the sink to ensure that changing a volume on one stream has no
1753 * effect that cannot be compensated for in another stream that
1754 * does not have the same channel map as the sink. */
1755
1756 if (pa_channel_map_equal(from, to))
1757 return v;
1758
1759 t = *template;
1760 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1761 *v = *template;
1762 return v;
1763 }
1764
1765 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1766 return v;
1767 }
1768
1769 /* Called from main thread. Only called for the root sink in volume sharing
1770 * cases, except for internal recursive calls. */
1771 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1772 pa_sink_input *i;
1773 uint32_t idx;
1774
1775 pa_sink_assert_ref(s);
1776 pa_assert(max_volume);
1777 pa_assert(channel_map);
1778 pa_assert(pa_sink_flat_volume_enabled(s));
1779
1780 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1781 pa_cvolume remapped;
1782
1783 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1784 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1785
1786 /* Ignore this input. The origin sink uses volume sharing, so this
1787 * input's volume will be set to be equal to the root sink's real
1788 * volume. Obviously this input's current volume must not then
1789 * affect what the root sink's real volume will be. */
1790 continue;
1791 }
1792
1793 remapped = i->volume;
1794 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1795 pa_cvolume_merge(max_volume, max_volume, &remapped);
1796 }
1797 }
1798
1799 /* Called from main thread. Only called for the root sink in volume sharing
1800 * cases, except for internal recursive calls. */
1801 static pa_bool_t has_inputs(pa_sink *s) {
1802 pa_sink_input *i;
1803 uint32_t idx;
1804
1805 pa_sink_assert_ref(s);
1806
1807 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1808 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1809 return TRUE;
1810 }
1811
1812 return FALSE;
1813 }
1814
1815 /* Called from main thread. Only called for the root sink in volume sharing
1816 * cases, except for internal recursive calls. */
1817 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1818 pa_sink_input *i;
1819 uint32_t idx;
1820
1821 pa_sink_assert_ref(s);
1822 pa_assert(new_volume);
1823 pa_assert(channel_map);
1824
1825 s->real_volume = *new_volume;
1826 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1827
1828 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1829 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1830 if (pa_sink_flat_volume_enabled(s)) {
1831 pa_cvolume old_volume = i->volume;
1832
1833 /* Follow the root sink's real volume. */
1834 i->volume = *new_volume;
1835 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1836 compute_reference_ratio(i);
1837
1838 /* The volume changed, let's tell people so */
1839 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1840 if (i->volume_changed)
1841 i->volume_changed(i);
1842
1843 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1844 }
1845 }
1846
1847 update_real_volume(i->origin_sink, new_volume, channel_map);
1848 }
1849 }
1850 }
1851
1852 /* Called from main thread. Only called for the root sink in shared volume
1853 * cases. */
1854 static void compute_real_volume(pa_sink *s) {
1855 pa_sink_assert_ref(s);
1856 pa_assert_ctl_context();
1857 pa_assert(PA_SINK_IS_LINKED(s->state));
1858 pa_assert(pa_sink_flat_volume_enabled(s));
1859 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1860
1861 /* This determines the maximum volume of all streams and sets
1862 * s->real_volume accordingly. */
1863
1864 if (!has_inputs(s)) {
1865 /* In the special case that we have no sink inputs we leave the
1866 * volume unmodified. */
1867 update_real_volume(s, &s->reference_volume, &s->channel_map);
1868 return;
1869 }
1870
1871 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1872
1873 /* First let's determine the new maximum volume of all inputs
1874 * connected to this sink */
1875 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1876 update_real_volume(s, &s->real_volume, &s->channel_map);
1877
1878 /* Then, let's update the real ratios/soft volumes of all inputs
1879 * connected to this sink */
1880 compute_real_ratios(s);
1881 }
1882
1883 /* Called from main thread. Only called for the root sink in shared volume
1884 * cases, except for internal recursive calls. */
1885 static void propagate_reference_volume(pa_sink *s) {
1886 pa_sink_input *i;
1887 uint32_t idx;
1888
1889 pa_sink_assert_ref(s);
1890 pa_assert_ctl_context();
1891 pa_assert(PA_SINK_IS_LINKED(s->state));
1892 pa_assert(pa_sink_flat_volume_enabled(s));
1893
1894 /* This is called whenever the sink volume changes that is not
1895 * caused by a sink input volume change. We need to fix up the
1896 * sink input volumes accordingly */
1897
1898 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1899 pa_cvolume old_volume;
1900
1901 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1902 propagate_reference_volume(i->origin_sink);
1903
1904 /* Since the origin sink uses volume sharing, this input's volume
1905 * needs to be updated to match the root sink's real volume, but
1906 * that will be done later in update_shared_real_volume(). */
1907 continue;
1908 }
1909
1910 old_volume = i->volume;
1911
1912 /* This basically calculates:
1913 *
1914 * i->volume := s->reference_volume * i->reference_ratio */
1915
1916 i->volume = s->reference_volume;
1917 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1918 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1919
1920 /* The volume changed, let's tell people so */
1921 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1922
1923 if (i->volume_changed)
1924 i->volume_changed(i);
1925
1926 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1927 }
1928 }
1929 }
1930
1931 /* Called from main thread. Only called for the root sink in volume sharing
1932 * cases, except for internal recursive calls. The return value indicates
1933 * whether any reference volume actually changed. */
1934 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1935 pa_cvolume volume;
1936 pa_bool_t reference_volume_changed;
1937 pa_sink_input *i;
1938 uint32_t idx;
1939
1940 pa_sink_assert_ref(s);
1941 pa_assert(PA_SINK_IS_LINKED(s->state));
1942 pa_assert(v);
1943 pa_assert(channel_map);
1944 pa_assert(pa_cvolume_valid(v));
1945
1946 volume = *v;
1947 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1948
1949 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1950 s->reference_volume = volume;
1951
1952 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1953
1954 if (reference_volume_changed)
1955 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1956 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1957 /* If the root sink's volume doesn't change, then there can't be any
1958 * changes in the other sinks in the sink tree either.
1959 *
1960 * It's probably theoretically possible that even if the root sink's
1961 * volume changes slightly, some filter sink doesn't change its volume
1962 * due to rounding errors. If that happens, we still want to propagate
1963 * the changed root sink volume to the sinks connected to the
1964 * intermediate sink that didn't change its volume. This theoretical
1965 * possibility is the reason why we have that !(s->flags &
1966 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1967 * notice even if we returned here FALSE always if
1968 * reference_volume_changed is FALSE. */
1969 return FALSE;
1970
1971 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1972 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1973 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1974 }
1975
1976 return TRUE;
1977 }
1978
1979 /* Called from main thread */
1980 void pa_sink_set_volume(
1981 pa_sink *s,
1982 const pa_cvolume *volume,
1983 pa_bool_t send_msg,
1984 pa_bool_t save) {
1985
1986 pa_cvolume new_reference_volume;
1987 pa_sink *root_sink;
1988
1989 pa_sink_assert_ref(s);
1990 pa_assert_ctl_context();
1991 pa_assert(PA_SINK_IS_LINKED(s->state));
1992 pa_assert(!volume || pa_cvolume_valid(volume));
1993 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1994 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1995
1996 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
1997 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1998 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1999 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2000 return;
2001 }
2002
2003 /* In case of volume sharing, the volume is set for the root sink first,
2004 * from which it's then propagated to the sharing sinks. */
2005 root_sink = pa_sink_get_master(s);
2006
2007 if (PA_UNLIKELY(!root_sink))
2008 return;
2009
2010 /* As a special exception we accept mono volumes on all sinks --
2011 * even on those with more complex channel maps */
2012
2013 if (volume) {
2014 if (pa_cvolume_compatible(volume, &s->sample_spec))
2015 new_reference_volume = *volume;
2016 else {
2017 new_reference_volume = s->reference_volume;
2018 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
2019 }
2020
2021 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2022
2023 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
2024 if (pa_sink_flat_volume_enabled(root_sink)) {
2025 /* OK, propagate this volume change back to the inputs */
2026 propagate_reference_volume(root_sink);
2027
2028 /* And now recalculate the real volume */
2029 compute_real_volume(root_sink);
2030 } else
2031 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
2032 }
2033
2034 } else {
2035 /* If volume is NULL we synchronize the sink's real and
2036 * reference volumes with the stream volumes. */
2037
2038 pa_assert(pa_sink_flat_volume_enabled(root_sink));
2039
2040 /* Ok, let's determine the new real volume */
2041 compute_real_volume(root_sink);
2042
2043 /* Let's 'push' the reference volume if necessary */
2044 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
2045 /* If the sink and it's root don't have the same number of channels, we need to remap */
2046 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2047 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2048 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2049
2050 /* Now that the reference volume is updated, we can update the streams'
2051 * reference ratios. */
2052 compute_reference_ratios(root_sink);
2053 }
2054
2055 if (root_sink->set_volume) {
2056 /* If we have a function set_volume(), then we do not apply a
2057 * soft volume by default. However, set_volume() is free to
2058 * apply one to root_sink->soft_volume */
2059
2060 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2061 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2062 root_sink->set_volume(root_sink);
2063
2064 } else
2065 /* If we have no function set_volume(), then the soft volume
2066 * becomes the real volume */
2067 root_sink->soft_volume = root_sink->real_volume;
2068
2069 /* This tells the sink that soft volume and/or real volume changed */
2070 if (send_msg)
2071 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2072 }
2073
2074 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2075 * Only to be called by sink implementor */
2076 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2077
2078 pa_sink_assert_ref(s);
2079 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2080
2081 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2082 pa_sink_assert_io_context(s);
2083 else
2084 pa_assert_ctl_context();
2085
2086 if (!volume)
2087 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2088 else
2089 s->soft_volume = *volume;
2090
2091 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2092 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2093 else
2094 s->thread_info.soft_volume = s->soft_volume;
2095 }
2096
2097 /* Called from the main thread. Only called for the root sink in volume sharing
2098 * cases, except for internal recursive calls. */
2099 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2100 pa_sink_input *i;
2101 uint32_t idx;
2102
2103 pa_sink_assert_ref(s);
2104 pa_assert(old_real_volume);
2105 pa_assert_ctl_context();
2106 pa_assert(PA_SINK_IS_LINKED(s->state));
2107
2108 /* This is called when the hardware's real volume changes due to
2109 * some external event. We copy the real volume into our
2110 * reference volume and then rebuild the stream volumes based on
2111 * i->real_ratio which should stay fixed. */
2112
2113 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2114 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2115 return;
2116
2117 /* 1. Make the real volume the reference volume */
2118 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
2119 }
2120
2121 if (pa_sink_flat_volume_enabled(s)) {
2122
2123 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2124 pa_cvolume old_volume = i->volume;
2125
2126 /* 2. Since the sink's reference and real volumes are equal
2127 * now our ratios should be too. */
2128 i->reference_ratio = i->real_ratio;
2129
2130 /* 3. Recalculate the new stream reference volume based on the
2131 * reference ratio and the sink's reference volume.
2132 *
2133 * This basically calculates:
2134 *
2135 * i->volume = s->reference_volume * i->reference_ratio
2136 *
2137 * This is identical to propagate_reference_volume() */
2138 i->volume = s->reference_volume;
2139 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2140 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2141
2142 /* Notify if something changed */
2143 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2144
2145 if (i->volume_changed)
2146 i->volume_changed(i);
2147
2148 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2149 }
2150
2151 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2152 propagate_real_volume(i->origin_sink, old_real_volume);
2153 }
2154 }
2155
2156 /* Something got changed in the hardware. It probably makes sense
2157 * to save changed hw settings given that hw volume changes not
2158 * triggered by PA are almost certainly done by the user. */
2159 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2160 s->save_volume = TRUE;
2161 }
2162
2163 /* Called from io thread */
2164 void pa_sink_update_volume_and_mute(pa_sink *s) {
2165 pa_assert(s);
2166 pa_sink_assert_io_context(s);
2167
2168 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2169 }
2170
2171 /* Called from main thread */
2172 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
2173 pa_sink_assert_ref(s);
2174 pa_assert_ctl_context();
2175 pa_assert(PA_SINK_IS_LINKED(s->state));
2176
2177 if (s->refresh_volume || force_refresh) {
2178 struct pa_cvolume old_real_volume;
2179
2180 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2181
2182 old_real_volume = s->real_volume;
2183
2184 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2185 s->get_volume(s);
2186
2187 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2188
2189 update_real_volume(s, &s->real_volume, &s->channel_map);
2190 propagate_real_volume(s, &old_real_volume);
2191 }
2192
2193 return &s->reference_volume;
2194 }
2195
2196 /* Called from main thread. In volume sharing cases, only the root sink may
2197 * call this. */
2198 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2199 pa_cvolume old_real_volume;
2200
2201 pa_sink_assert_ref(s);
2202 pa_assert_ctl_context();
2203 pa_assert(PA_SINK_IS_LINKED(s->state));
2204 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2205
2206 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2207
2208 old_real_volume = s->real_volume;
2209 update_real_volume(s, new_real_volume, &s->channel_map);
2210 propagate_real_volume(s, &old_real_volume);
2211 }
2212
2213 /* Called from main thread */
2214 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2215 pa_bool_t old_muted;
2216
2217 pa_sink_assert_ref(s);
2218 pa_assert_ctl_context();
2219 pa_assert(PA_SINK_IS_LINKED(s->state));
2220
2221 old_muted = s->muted;
2222 s->muted = mute;
2223 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2224
2225 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2226 s->set_mute(s);
2227
2228 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2229
2230 if (old_muted != s->muted)
2231 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2232 }
2233
2234 /* Called from main thread */
2235 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2236
2237 pa_sink_assert_ref(s);
2238 pa_assert_ctl_context();
2239 pa_assert(PA_SINK_IS_LINKED(s->state));
2240
2241 if (s->refresh_muted || force_refresh) {
2242 pa_bool_t old_muted = s->muted;
2243
2244 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2245 s->get_mute(s);
2246
2247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2248
2249 if (old_muted != s->muted) {
2250 s->save_muted = TRUE;
2251
2252 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2253
2254 /* Make sure the soft mute status stays in sync */
2255 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2256 }
2257 }
2258
2259 return s->muted;
2260 }
2261
2262 /* Called from main thread */
2263 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2264 pa_sink_assert_ref(s);
2265 pa_assert_ctl_context();
2266 pa_assert(PA_SINK_IS_LINKED(s->state));
2267
2268 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2269
2270 if (s->muted == new_muted)
2271 return;
2272
2273 s->muted = new_muted;
2274 s->save_muted = TRUE;
2275
2276 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2277 }
2278
2279 /* Called from main thread */
2280 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2281 pa_sink_assert_ref(s);
2282 pa_assert_ctl_context();
2283
2284 if (p)
2285 pa_proplist_update(s->proplist, mode, p);
2286
2287 if (PA_SINK_IS_LINKED(s->state)) {
2288 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2289 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2290 }
2291
2292 return TRUE;
2293 }
2294
2295 /* Called from main thread */
2296 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2297 void pa_sink_set_description(pa_sink *s, const char *description) {
2298 const char *old;
2299 pa_sink_assert_ref(s);
2300 pa_assert_ctl_context();
2301
2302 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2303 return;
2304
2305 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2306
2307 if (old && description && pa_streq(old, description))
2308 return;
2309
2310 if (description)
2311 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2312 else
2313 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2314
2315 if (s->monitor_source) {
2316 char *n;
2317
2318 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2319 pa_source_set_description(s->monitor_source, n);
2320 pa_xfree(n);
2321 }
2322
2323 if (PA_SINK_IS_LINKED(s->state)) {
2324 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2325 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2326 }
2327 }
2328
2329 /* Called from main thread */
2330 unsigned pa_sink_linked_by(pa_sink *s) {
2331 unsigned ret;
2332
2333 pa_sink_assert_ref(s);
2334 pa_assert_ctl_context();
2335 pa_assert(PA_SINK_IS_LINKED(s->state));
2336
2337 ret = pa_idxset_size(s->inputs);
2338
2339 /* We add in the number of streams connected to us here. Please
2340 * note the asymmetry to pa_sink_used_by()! */
2341
2342 if (s->monitor_source)
2343 ret += pa_source_linked_by(s->monitor_source);
2344
2345 return ret;
2346 }
2347
2348 /* Called from main thread */
2349 unsigned pa_sink_used_by(pa_sink *s) {
2350 unsigned ret;
2351
2352 pa_sink_assert_ref(s);
2353 pa_assert_ctl_context();
2354 pa_assert(PA_SINK_IS_LINKED(s->state));
2355
2356 ret = pa_idxset_size(s->inputs);
2357 pa_assert(ret >= s->n_corked);
2358
2359 /* Streams connected to our monitor source do not matter for
2360 * pa_sink_used_by()!.*/
2361
2362 return ret - s->n_corked;
2363 }
2364
2365 /* Called from main thread */
2366 unsigned pa_sink_check_suspend(pa_sink *s) {
2367 unsigned ret;
2368 pa_sink_input *i;
2369 uint32_t idx;
2370
2371 pa_sink_assert_ref(s);
2372 pa_assert_ctl_context();
2373
2374 if (!PA_SINK_IS_LINKED(s->state))
2375 return 0;
2376
2377 ret = 0;
2378
2379 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2380 pa_sink_input_state_t st;
2381
2382 st = pa_sink_input_get_state(i);
2383
2384 /* We do not assert here. It is perfectly valid for a sink input to
2385 * be in the INIT state (i.e. created, marked done but not yet put)
2386 * and we should not care if it's unlinked as it won't contribute
2387 * towards our busy status.
2388 */
2389 if (!PA_SINK_INPUT_IS_LINKED(st))
2390 continue;
2391
2392 if (st == PA_SINK_INPUT_CORKED)
2393 continue;
2394
2395 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2396 continue;
2397
2398 ret ++;
2399 }
2400
2401 if (s->monitor_source)
2402 ret += pa_source_check_suspend(s->monitor_source);
2403
2404 return ret;
2405 }
2406
2407 /* Called from the IO thread */
2408 static void sync_input_volumes_within_thread(pa_sink *s) {
2409 pa_sink_input *i;
2410 void *state = NULL;
2411
2412 pa_sink_assert_ref(s);
2413 pa_sink_assert_io_context(s);
2414
2415 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2416 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2417 continue;
2418
2419 i->thread_info.soft_volume = i->soft_volume;
2420 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2421 }
2422 }
2423
2424 /* Called from the IO thread. Only called for the root sink in volume sharing
2425 * cases, except for internal recursive calls. */
2426 static void set_shared_volume_within_thread(pa_sink *s) {
2427 pa_sink_input *i = NULL;
2428 void *state = NULL;
2429
2430 pa_sink_assert_ref(s);
2431
2432 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2433
2434 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2435 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2436 set_shared_volume_within_thread(i->origin_sink);
2437 }
2438 }
2439
2440 /* Called from IO thread, except when it is not */
2441 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2442 pa_sink *s = PA_SINK(o);
2443 pa_sink_assert_ref(s);
2444
2445 switch ((pa_sink_message_t) code) {
2446
2447 case PA_SINK_MESSAGE_ADD_INPUT: {
2448 pa_sink_input *i = PA_SINK_INPUT(userdata);
2449
2450 /* If you change anything here, make sure to change the
2451 * sink input handling a few lines down at
2452 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2453
2454 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2455
2456 /* Since the caller sleeps in pa_sink_input_put(), we can
2457 * safely access data outside of thread_info even though
2458 * it is mutable */
2459
2460 if ((i->thread_info.sync_prev = i->sync_prev)) {
2461 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2462 pa_assert(i->sync_prev->sync_next == i);
2463 i->thread_info.sync_prev->thread_info.sync_next = i;
2464 }
2465
2466 if ((i->thread_info.sync_next = i->sync_next)) {
2467 pa_assert(i->sink == i->thread_info.sync_next->sink);
2468 pa_assert(i->sync_next->sync_prev == i);
2469 i->thread_info.sync_next->thread_info.sync_prev = i;
2470 }
2471
2472 pa_assert(!i->thread_info.attached);
2473 i->thread_info.attached = TRUE;
2474
2475 if (i->attach)
2476 i->attach(i);
2477
2478 pa_sink_input_set_state_within_thread(i, i->state);
2479
2480 /* The requested latency of the sink input needs to be fixed up and
2481 * then configured on the sink. If this causes the sink latency to
2482 * go down, the sink implementor is responsible for doing a rewind
2483 * in the update_requested_latency() callback to ensure that the
2484 * sink buffer doesn't contain more data than what the new latency
2485 * allows.
2486 *
2487 * XXX: Does it really make sense to push this responsibility to
2488 * the sink implementors? Wouldn't it be better to do it once in
2489 * the core than many times in the modules? */
2490
2491 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2492 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2493
2494 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2495 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2496
2497 /* We don't rewind here automatically. This is left to the
2498 * sink input implementor because some sink inputs need a
2499 * slow start, i.e. need some time to buffer client
2500 * samples before beginning streaming.
2501 *
2502 * XXX: Does it really make sense to push this functionality to
2503 * the sink implementors? Wouldn't it be better to do it once in
2504 * the core than many times in the modules? */
2505
2506 /* In flat volume mode we need to update the volume as
2507 * well */
2508 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2509 }
2510
2511 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2512 pa_sink_input *i = PA_SINK_INPUT(userdata);
2513
2514 /* If you change anything here, make sure to change the
2515 * sink input handling a few lines down at
2516 * PA_SINK_MESSAGE_START_MOVE, too. */
2517
2518 if (i->detach)
2519 i->detach(i);
2520
2521 pa_sink_input_set_state_within_thread(i, i->state);
2522
2523 pa_assert(i->thread_info.attached);
2524 i->thread_info.attached = FALSE;
2525
2526 /* Since the caller sleeps in pa_sink_input_unlink(),
2527 * we can safely access data outside of thread_info even
2528 * though it is mutable */
2529
2530 pa_assert(!i->sync_prev);
2531 pa_assert(!i->sync_next);
2532
2533 if (i->thread_info.sync_prev) {
2534 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2535 i->thread_info.sync_prev = NULL;
2536 }
2537
2538 if (i->thread_info.sync_next) {
2539 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2540 i->thread_info.sync_next = NULL;
2541 }
2542
2543 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2544 pa_sink_input_unref(i);
2545
2546 pa_sink_invalidate_requested_latency(s, TRUE);
2547 pa_sink_request_rewind(s, (size_t) -1);
2548
2549 /* In flat volume mode we need to update the volume as
2550 * well */
2551 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2552 }
2553
2554 case PA_SINK_MESSAGE_START_MOVE: {
2555 pa_sink_input *i = PA_SINK_INPUT(userdata);
2556
2557 /* We don't support moving synchronized streams. */
2558 pa_assert(!i->sync_prev);
2559 pa_assert(!i->sync_next);
2560 pa_assert(!i->thread_info.sync_next);
2561 pa_assert(!i->thread_info.sync_prev);
2562
2563 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2564 pa_usec_t usec = 0;
2565 size_t sink_nbytes, total_nbytes;
2566
2567 /* The old sink probably has some audio from this
2568 * stream in its buffer. We want to "take it back" as
2569 * much as possible and play it to the new sink. We
2570 * don't know at this point how much the old sink can
2571 * rewind. We have to pick something, and that
2572 * something is the full latency of the old sink here.
2573 * So we rewind the stream buffer by the sink latency
2574 * amount, which may be more than what we should
2575 * rewind. This can result in a chunk of audio being
2576 * played both to the old sink and the new sink.
2577 *
2578 * FIXME: Fix this code so that we don't have to make
2579 * guesses about how much the sink will actually be
2580 * able to rewind. If someone comes up with a solution
2581 * for this, something to note is that the part of the
2582 * latency that the old sink couldn't rewind should
2583 * ideally be compensated after the stream has moved
2584 * to the new sink by adding silence. The new sink
2585 * most likely can't start playing the moved stream
2586 * immediately, and that gap should be removed from
2587 * the "compensation silence" (at least at the time of
2588 * writing this, the move finish code will actually
2589 * already take care of dropping the new sink's
2590 * unrewindable latency, so taking into account the
2591 * unrewindable latency of the old sink is the only
2592 * problem).
2593 *
2594 * The render_memblockq contents are discarded,
2595 * because when the sink changes, the format of the
2596 * audio stored in the render_memblockq may change
2597 * too, making the stored audio invalid. FIXME:
2598 * However, the read and write indices are moved back
2599 * the same amount, so if they are not the same now,
2600 * they won't be the same after the rewind either. If
2601 * the write index of the render_memblockq is ahead of
2602 * the read index, then the render_memblockq will feed
2603 * the new sink some silence first, which it shouldn't
2604 * do. The write index should be flushed to be the
2605 * same as the read index. */
2606
2607 /* Get the latency of the sink */
2608 usec = pa_sink_get_latency_within_thread(s);
2609 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2610 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2611
2612 if (total_nbytes > 0) {
2613 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2614 i->thread_info.rewrite_flush = TRUE;
2615 pa_sink_input_process_rewind(i, sink_nbytes);
2616 }
2617 }
2618
2619 if (i->detach)
2620 i->detach(i);
2621
2622 pa_assert(i->thread_info.attached);
2623 i->thread_info.attached = FALSE;
2624
2625 /* Let's remove the sink input ...*/
2626 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2627 pa_sink_input_unref(i);
2628
2629 pa_sink_invalidate_requested_latency(s, TRUE);
2630
2631 pa_log_debug("Requesting rewind due to started move");
2632 pa_sink_request_rewind(s, (size_t) -1);
2633
2634 /* In flat volume mode we need to update the volume as
2635 * well */
2636 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2637 }
2638
2639 case PA_SINK_MESSAGE_FINISH_MOVE: {
2640 pa_sink_input *i = PA_SINK_INPUT(userdata);
2641
2642 /* We don't support moving synchronized streams. */
2643 pa_assert(!i->sync_prev);
2644 pa_assert(!i->sync_next);
2645 pa_assert(!i->thread_info.sync_next);
2646 pa_assert(!i->thread_info.sync_prev);
2647
2648 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2649
2650 pa_assert(!i->thread_info.attached);
2651 i->thread_info.attached = TRUE;
2652
2653 if (i->attach)
2654 i->attach(i);
2655
2656 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2657 pa_usec_t usec = 0;
2658 size_t nbytes;
2659
2660 /* In the ideal case the new sink would start playing
2661 * the stream immediately. That requires the sink to
2662 * be able to rewind all of its latency, which usually
2663 * isn't possible, so there will probably be some gap
2664 * before the moved stream becomes audible. We then
2665 * have two possibilities: 1) start playing the stream
2666 * from where it is now, or 2) drop the unrewindable
2667 * latency of the sink from the stream. With option 1
2668 * we won't lose any audio but the stream will have a
2669 * pause. With option 2 we may lose some audio but the
2670 * stream time will be somewhat in sync with the wall
2671 * clock. Lennart seems to have chosen option 2 (one
2672 * of the reasons might have been that option 1 is
2673 * actually much harder to implement), so we drop the
2674 * latency of the new sink from the moved stream and
2675 * hope that the sink will undo most of that in the
2676 * rewind. */
2677
2678 /* Get the latency of the sink */
2679 usec = pa_sink_get_latency_within_thread(s);
2680 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2681
2682 if (nbytes > 0)
2683 pa_sink_input_drop(i, nbytes);
2684
2685 pa_log_debug("Requesting rewind due to finished move");
2686 pa_sink_request_rewind(s, nbytes);
2687 }
2688
2689 /* Updating the requested sink latency has to be done
2690 * after the sink rewind request, not before, because
2691 * otherwise the sink may limit the rewind amount
2692 * needlessly. */
2693
2694 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2695 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2696
2697 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2698 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2699
2700 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2701 }
2702
2703 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2704 pa_sink *root_sink = pa_sink_get_master(s);
2705
2706 if (PA_LIKELY(root_sink))
2707 set_shared_volume_within_thread(root_sink);
2708
2709 return 0;
2710 }
2711
2712 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2713
2714 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2715 s->set_volume(s);
2716 pa_sink_volume_change_push(s);
2717 }
2718 /* Fall through ... */
2719
2720 case PA_SINK_MESSAGE_SET_VOLUME:
2721
2722 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2723 s->thread_info.soft_volume = s->soft_volume;
2724 pa_sink_request_rewind(s, (size_t) -1);
2725 }
2726
2727 /* Fall through ... */
2728
2729 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2730 sync_input_volumes_within_thread(s);
2731 return 0;
2732
2733 case PA_SINK_MESSAGE_GET_VOLUME:
2734
2735 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2736 s->get_volume(s);
2737 pa_sink_volume_change_flush(s);
2738 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2739 }
2740
2741 /* In case sink implementor reset SW volume. */
2742 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2743 s->thread_info.soft_volume = s->soft_volume;
2744 pa_sink_request_rewind(s, (size_t) -1);
2745 }
2746
2747 return 0;
2748
2749 case PA_SINK_MESSAGE_SET_MUTE:
2750
2751 if (s->thread_info.soft_muted != s->muted) {
2752 s->thread_info.soft_muted = s->muted;
2753 pa_sink_request_rewind(s, (size_t) -1);
2754 }
2755
2756 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2757 s->set_mute(s);
2758
2759 return 0;
2760
2761 case PA_SINK_MESSAGE_GET_MUTE:
2762
2763 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2764 s->get_mute(s);
2765
2766 return 0;
2767
2768 case PA_SINK_MESSAGE_SET_STATE: {
2769
2770 pa_bool_t suspend_change =
2771 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2772 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2773
2774 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2775
2776 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2777 s->thread_info.rewind_nbytes = 0;
2778 s->thread_info.rewind_requested = FALSE;
2779 }
2780
2781 if (suspend_change) {
2782 pa_sink_input *i;
2783 void *state = NULL;
2784
2785 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2786 if (i->suspend_within_thread)
2787 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2788 }
2789
2790 return 0;
2791 }
2792
2793 case PA_SINK_MESSAGE_DETACH:
2794
2795 /* Detach all streams */
2796 pa_sink_detach_within_thread(s);
2797 return 0;
2798
2799 case PA_SINK_MESSAGE_ATTACH:
2800
2801 /* Reattach all streams */
2802 pa_sink_attach_within_thread(s);
2803 return 0;
2804
2805 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2806
2807 pa_usec_t *usec = userdata;
2808 *usec = pa_sink_get_requested_latency_within_thread(s);
2809
2810 /* Yes, that's right, the IO thread will see -1 when no
2811 * explicit requested latency is configured, the main
2812 * thread will see max_latency */
2813 if (*usec == (pa_usec_t) -1)
2814 *usec = s->thread_info.max_latency;
2815
2816 return 0;
2817 }
2818
2819 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2820 pa_usec_t *r = userdata;
2821
2822 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2823
2824 return 0;
2825 }
2826
2827 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2828 pa_usec_t *r = userdata;
2829
2830 r[0] = s->thread_info.min_latency;
2831 r[1] = s->thread_info.max_latency;
2832
2833 return 0;
2834 }
2835
2836 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2837
2838 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2839 return 0;
2840
2841 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2842
2843 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2844 return 0;
2845
2846 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2847
2848 *((size_t*) userdata) = s->thread_info.max_rewind;
2849 return 0;
2850
2851 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2852
2853 *((size_t*) userdata) = s->thread_info.max_request;
2854 return 0;
2855
2856 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2857
2858 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2859 return 0;
2860
2861 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2862
2863 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2864 return 0;
2865
2866 case PA_SINK_MESSAGE_SET_PORT:
2867
2868 pa_assert(userdata);
2869 if (s->set_port) {
2870 struct sink_message_set_port *msg_data = userdata;
2871 msg_data->ret = s->set_port(s, msg_data->port);
2872 }
2873 return 0;
2874
2875 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2876 /* This message is sent from IO-thread and handled in main thread. */
2877 pa_assert_ctl_context();
2878
2879 /* Make sure we're not messing with main thread when no longer linked */
2880 if (!PA_SINK_IS_LINKED(s->state))
2881 return 0;
2882
2883 pa_sink_get_volume(s, TRUE);
2884 pa_sink_get_mute(s, TRUE);
2885 return 0;
2886
2887 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2888 s->thread_info.latency_offset = offset;
2889 return 0;
2890
2891 case PA_SINK_MESSAGE_GET_LATENCY:
2892 case PA_SINK_MESSAGE_MAX:
2893 ;
2894 }
2895
2896 return -1;
2897 }
2898
2899 /* Called from main thread */
2900 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2901 pa_sink *sink;
2902 uint32_t idx;
2903 int ret = 0;
2904
2905 pa_core_assert_ref(c);
2906 pa_assert_ctl_context();
2907 pa_assert(cause != 0);
2908
2909 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2910 int r;
2911
2912 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2913 ret = r;
2914 }
2915
2916 return ret;
2917 }
2918
2919 /* Called from main thread */
2920 void pa_sink_detach(pa_sink *s) {
2921 pa_sink_assert_ref(s);
2922 pa_assert_ctl_context();
2923 pa_assert(PA_SINK_IS_LINKED(s->state));
2924
2925 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2926 }
2927
2928 /* Called from main thread */
2929 void pa_sink_attach(pa_sink *s) {
2930 pa_sink_assert_ref(s);
2931 pa_assert_ctl_context();
2932 pa_assert(PA_SINK_IS_LINKED(s->state));
2933
2934 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2935 }
2936
2937 /* Called from IO thread */
2938 void pa_sink_detach_within_thread(pa_sink *s) {
2939 pa_sink_input *i;
2940 void *state = NULL;
2941
2942 pa_sink_assert_ref(s);
2943 pa_sink_assert_io_context(s);
2944 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2945
2946 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2947 if (i->detach)
2948 i->detach(i);
2949
2950 if (s->monitor_source)
2951 pa_source_detach_within_thread(s->monitor_source);
2952 }
2953
2954 /* Called from IO thread */
2955 void pa_sink_attach_within_thread(pa_sink *s) {
2956 pa_sink_input *i;
2957 void *state = NULL;
2958
2959 pa_sink_assert_ref(s);
2960 pa_sink_assert_io_context(s);
2961 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2962
2963 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2964 if (i->attach)
2965 i->attach(i);
2966
2967 if (s->monitor_source)
2968 pa_source_attach_within_thread(s->monitor_source);
2969 }
2970
2971 /* Called from IO thread */
2972 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2973 pa_sink_assert_ref(s);
2974 pa_sink_assert_io_context(s);
2975 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2976
2977 if (nbytes == (size_t) -1)
2978 nbytes = s->thread_info.max_rewind;
2979
2980 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2981
2982 if (s->thread_info.rewind_requested &&
2983 nbytes <= s->thread_info.rewind_nbytes)
2984 return;
2985
2986 s->thread_info.rewind_nbytes = nbytes;
2987 s->thread_info.rewind_requested = TRUE;
2988
2989 if (s->request_rewind)
2990 s->request_rewind(s);
2991 }
2992
2993 /* Called from IO thread */
2994 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2995 pa_usec_t result = (pa_usec_t) -1;
2996 pa_sink_input *i;
2997 void *state = NULL;
2998 pa_usec_t monitor_latency;
2999
3000 pa_sink_assert_ref(s);
3001 pa_sink_assert_io_context(s);
3002
3003 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
3004 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
3005
3006 if (s->thread_info.requested_latency_valid)
3007 return s->thread_info.requested_latency;
3008
3009 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3010 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
3011 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
3012 result = i->thread_info.requested_sink_latency;
3013
3014 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
3015
3016 if (monitor_latency != (pa_usec_t) -1 &&
3017 (result == (pa_usec_t) -1 || result > monitor_latency))
3018 result = monitor_latency;
3019
3020 if (result != (pa_usec_t) -1)
3021 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
3022
3023 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3024 /* Only cache if properly initialized */
3025 s->thread_info.requested_latency = result;
3026 s->thread_info.requested_latency_valid = TRUE;
3027 }
3028
3029 return result;
3030 }
3031
3032 /* Called from main thread */
3033 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
3034 pa_usec_t usec = 0;
3035
3036 pa_sink_assert_ref(s);
3037 pa_assert_ctl_context();
3038 pa_assert(PA_SINK_IS_LINKED(s->state));
3039
3040 if (s->state == PA_SINK_SUSPENDED)
3041 return 0;
3042
3043 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
3044
3045 return usec;
3046 }
3047
3048 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3049 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3050 pa_sink_input *i;
3051 void *state = NULL;
3052
3053 pa_sink_assert_ref(s);
3054 pa_sink_assert_io_context(s);
3055
3056 if (max_rewind == s->thread_info.max_rewind)
3057 return;
3058
3059 s->thread_info.max_rewind = max_rewind;
3060
3061 if (PA_SINK_IS_LINKED(s->thread_info.state))
3062 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3063 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3064
3065 if (s->monitor_source)
3066 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3067 }
3068
3069 /* Called from main thread */
3070 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3071 pa_sink_assert_ref(s);
3072 pa_assert_ctl_context();
3073
3074 if (PA_SINK_IS_LINKED(s->state))
3075 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3076 else
3077 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3078 }
3079
3080 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3081 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3082 void *state = NULL;
3083
3084 pa_sink_assert_ref(s);
3085 pa_sink_assert_io_context(s);
3086
3087 if (max_request == s->thread_info.max_request)
3088 return;
3089
3090 s->thread_info.max_request = max_request;
3091
3092 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3093 pa_sink_input *i;
3094
3095 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3096 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3097 }
3098 }
3099
3100 /* Called from main thread */
3101 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3102 pa_sink_assert_ref(s);
3103 pa_assert_ctl_context();
3104
3105 if (PA_SINK_IS_LINKED(s->state))
3106 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3107 else
3108 pa_sink_set_max_request_within_thread(s, max_request);
3109 }
3110
3111 /* Called from IO thread */
3112 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
3113 pa_sink_input *i;
3114 void *state = NULL;
3115
3116 pa_sink_assert_ref(s);
3117 pa_sink_assert_io_context(s);
3118
3119 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3120 s->thread_info.requested_latency_valid = FALSE;
3121 else if (dynamic)
3122 return;
3123
3124 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3125
3126 if (s->update_requested_latency)
3127 s->update_requested_latency(s);
3128
3129 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3130 if (i->update_sink_requested_latency)
3131 i->update_sink_requested_latency(i);
3132 }
3133 }
3134
3135 /* Called from main thread */
3136 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3137 pa_sink_assert_ref(s);
3138 pa_assert_ctl_context();
3139
3140 /* min_latency == 0: no limit
3141 * min_latency anything else: specified limit
3142 *
3143 * Similar for max_latency */
3144
3145 if (min_latency < ABSOLUTE_MIN_LATENCY)
3146 min_latency = ABSOLUTE_MIN_LATENCY;
3147
3148 if (max_latency <= 0 ||
3149 max_latency > ABSOLUTE_MAX_LATENCY)
3150 max_latency = ABSOLUTE_MAX_LATENCY;
3151
3152 pa_assert(min_latency <= max_latency);
3153
3154 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3155 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3156 max_latency == ABSOLUTE_MAX_LATENCY) ||
3157 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3158
3159 if (PA_SINK_IS_LINKED(s->state)) {
3160 pa_usec_t r[2];
3161
3162 r[0] = min_latency;
3163 r[1] = max_latency;
3164
3165 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3166 } else
3167 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3168 }
3169
3170 /* Called from main thread */
3171 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3172 pa_sink_assert_ref(s);
3173 pa_assert_ctl_context();
3174 pa_assert(min_latency);
3175 pa_assert(max_latency);
3176
3177 if (PA_SINK_IS_LINKED(s->state)) {
3178 pa_usec_t r[2] = { 0, 0 };
3179
3180 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3181
3182 *min_latency = r[0];
3183 *max_latency = r[1];
3184 } else {
3185 *min_latency = s->thread_info.min_latency;
3186 *max_latency = s->thread_info.max_latency;
3187 }
3188 }
3189
3190 /* Called from IO thread */
3191 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3192 pa_sink_assert_ref(s);
3193 pa_sink_assert_io_context(s);
3194
3195 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3196 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3197 pa_assert(min_latency <= max_latency);
3198
3199 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3200 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3201 max_latency == ABSOLUTE_MAX_LATENCY) ||
3202 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3203
3204 if (s->thread_info.min_latency == min_latency &&
3205 s->thread_info.max_latency == max_latency)
3206 return;
3207
3208 s->thread_info.min_latency = min_latency;
3209 s->thread_info.max_latency = max_latency;
3210
3211 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3212 pa_sink_input *i;
3213 void *state = NULL;
3214
3215 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3216 if (i->update_sink_latency_range)
3217 i->update_sink_latency_range(i);
3218 }
3219
3220 pa_sink_invalidate_requested_latency(s, FALSE);
3221
3222 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3223 }
3224
3225 /* Called from main thread */
3226 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3227 pa_sink_assert_ref(s);
3228 pa_assert_ctl_context();
3229
3230 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3231 pa_assert(latency == 0);
3232 return;
3233 }
3234
3235 if (latency < ABSOLUTE_MIN_LATENCY)
3236 latency = ABSOLUTE_MIN_LATENCY;
3237
3238 if (latency > ABSOLUTE_MAX_LATENCY)
3239 latency = ABSOLUTE_MAX_LATENCY;
3240
3241 if (PA_SINK_IS_LINKED(s->state))
3242 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3243 else
3244 s->thread_info.fixed_latency = latency;
3245
3246 pa_source_set_fixed_latency(s->monitor_source, latency);
3247 }
3248
3249 /* Called from main thread */
3250 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3251 pa_usec_t latency;
3252
3253 pa_sink_assert_ref(s);
3254 pa_assert_ctl_context();
3255
3256 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3257 return 0;
3258
3259 if (PA_SINK_IS_LINKED(s->state))
3260 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3261 else
3262 latency = s->thread_info.fixed_latency;
3263
3264 return latency;
3265 }
3266
3267 /* Called from IO thread */
3268 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3269 pa_sink_assert_ref(s);
3270 pa_sink_assert_io_context(s);
3271
3272 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3273 pa_assert(latency == 0);
3274 s->thread_info.fixed_latency = 0;
3275
3276 if (s->monitor_source)
3277 pa_source_set_fixed_latency_within_thread(s->monitor_source, 0);
3278
3279 return;
3280 }
3281
3282 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3283 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3284
3285 if (s->thread_info.fixed_latency == latency)
3286 return;
3287
3288 s->thread_info.fixed_latency = latency;
3289
3290 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3291 pa_sink_input *i;
3292 void *state = NULL;
3293
3294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3295 if (i->update_sink_fixed_latency)
3296 i->update_sink_fixed_latency(i);
3297 }
3298
3299 pa_sink_invalidate_requested_latency(s, FALSE);
3300
3301 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3302 }
3303
3304 /* Called from main context */
3305 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3306 pa_sink_assert_ref(s);
3307
3308 s->latency_offset = offset;
3309
3310 if (PA_SINK_IS_LINKED(s->state))
3311 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3312 else
3313 s->thread_info.latency_offset = offset;
3314 }
3315
3316 /* Called from main context */
3317 size_t pa_sink_get_max_rewind(pa_sink *s) {
3318 size_t r;
3319 pa_assert_ctl_context();
3320 pa_sink_assert_ref(s);
3321
3322 if (!PA_SINK_IS_LINKED(s->state))
3323 return s->thread_info.max_rewind;
3324
3325 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3326
3327 return r;
3328 }
3329
3330 /* Called from main context */
3331 size_t pa_sink_get_max_request(pa_sink *s) {
3332 size_t r;
3333 pa_sink_assert_ref(s);
3334 pa_assert_ctl_context();
3335
3336 if (!PA_SINK_IS_LINKED(s->state))
3337 return s->thread_info.max_request;
3338
3339 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3340
3341 return r;
3342 }
3343
3344 /* Called from main context */
3345 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3346 pa_device_port *port;
3347 int ret;
3348
3349 pa_sink_assert_ref(s);
3350 pa_assert_ctl_context();
3351
3352 if (!s->set_port) {
3353 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3354 return -PA_ERR_NOTIMPLEMENTED;
3355 }
3356
3357 if (!name)
3358 return -PA_ERR_NOENTITY;
3359
3360 if (!(port = pa_hashmap_get(s->ports, name)))
3361 return -PA_ERR_NOENTITY;
3362
3363 if (s->active_port == port) {
3364 s->save_port = s->save_port || save;
3365 return 0;
3366 }
3367
3368 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3369 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3370 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3371 ret = msg.ret;
3372 }
3373 else
3374 ret = s->set_port(s, port);
3375
3376 if (ret < 0)
3377 return -PA_ERR_NOENTITY;
3378
3379 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3380
3381 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3382
3383 s->active_port = port;
3384 s->save_port = save;
3385
3386 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3387
3388 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3389
3390 return 0;
3391 }
3392
3393 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3394 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3395
3396 pa_assert(p);
3397
3398 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3399 return TRUE;
3400
3401 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3402
3403 if (pa_streq(ff, "microphone"))
3404 t = "audio-input-microphone";
3405 else if (pa_streq(ff, "webcam"))
3406 t = "camera-web";
3407 else if (pa_streq(ff, "computer"))
3408 t = "computer";
3409 else if (pa_streq(ff, "handset"))
3410 t = "phone";
3411 else if (pa_streq(ff, "portable"))
3412 t = "multimedia-player";
3413 else if (pa_streq(ff, "tv"))
3414 t = "video-display";
3415
3416 /*
3417 * The following icons are not part of the icon naming spec,
3418 * because Rodney Dawes sucks as the maintainer of that spec.
3419 *
3420 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3421 */
3422 else if (pa_streq(ff, "headset"))
3423 t = "audio-headset";
3424 else if (pa_streq(ff, "headphone"))
3425 t = "audio-headphones";
3426 else if (pa_streq(ff, "speaker"))
3427 t = "audio-speakers";
3428 else if (pa_streq(ff, "hands-free"))
3429 t = "audio-handsfree";
3430 }
3431
3432 if (!t)
3433 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3434 if (pa_streq(c, "modem"))
3435 t = "modem";
3436
3437 if (!t) {
3438 if (is_sink)
3439 t = "audio-card";
3440 else
3441 t = "audio-input-microphone";
3442 }
3443
3444 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3445 if (strstr(profile, "analog"))
3446 s = "-analog";
3447 else if (strstr(profile, "iec958"))
3448 s = "-iec958";
3449 else if (strstr(profile, "hdmi"))
3450 s = "-hdmi";
3451 }
3452
3453 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3454
3455 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3456
3457 return TRUE;
3458 }
3459
3460 pa_bool_t pa_device_init_description(pa_proplist *p) {
3461 const char *s, *d = NULL, *k;
3462 pa_assert(p);
3463
3464 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3465 return TRUE;
3466
3467 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3468 if (pa_streq(s, "internal"))
3469 d = _("Built-in Audio");
3470
3471 if (!d)
3472 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3473 if (pa_streq(s, "modem"))
3474 d = _("Modem");
3475
3476 if (!d)
3477 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3478
3479 if (!d)
3480 return FALSE;
3481
3482 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3483
3484 if (d && k)
3485 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3486 else if (d)
3487 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3488
3489 return TRUE;
3490 }
3491
3492 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3493 const char *s;
3494 pa_assert(p);
3495
3496 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3497 return TRUE;
3498
3499 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3500 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3501 || pa_streq(s, "headset")) {
3502 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3503 return TRUE;
3504 }
3505
3506 return FALSE;
3507 }
3508
3509 unsigned pa_device_init_priority(pa_proplist *p) {
3510 const char *s;
3511 unsigned priority = 0;
3512
3513 pa_assert(p);
3514
3515 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3516
3517 if (pa_streq(s, "sound"))
3518 priority += 9000;
3519 else if (!pa_streq(s, "modem"))
3520 priority += 1000;
3521 }
3522
3523 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3524
3525 if (pa_streq(s, "internal"))
3526 priority += 900;
3527 else if (pa_streq(s, "speaker"))
3528 priority += 500;
3529 else if (pa_streq(s, "headphone"))
3530 priority += 400;
3531 }
3532
3533 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3534
3535 if (pa_streq(s, "pci"))
3536 priority += 50;
3537 else if (pa_streq(s, "usb"))
3538 priority += 40;
3539 else if (pa_streq(s, "bluetooth"))
3540 priority += 30;
3541 }
3542
3543 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3544
3545 if (pa_startswith(s, "analog-"))
3546 priority += 9;
3547 else if (pa_startswith(s, "iec958-"))
3548 priority += 8;
3549 }
3550
3551 return priority;
3552 }
3553
3554 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3555
3556 /* Called from the IO thread. */
3557 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3558 pa_sink_volume_change *c;
3559 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3560 c = pa_xnew(pa_sink_volume_change, 1);
3561
3562 PA_LLIST_INIT(pa_sink_volume_change, c);
3563 c->at = 0;
3564 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3565 return c;
3566 }
3567
3568 /* Called from the IO thread. */
3569 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3570 pa_assert(c);
3571 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3572 pa_xfree(c);
3573 }
3574
3575 /* Called from the IO thread. */
3576 void pa_sink_volume_change_push(pa_sink *s) {
3577 pa_sink_volume_change *c = NULL;
3578 pa_sink_volume_change *nc = NULL;
3579 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3580
3581 const char *direction = NULL;
3582
3583 pa_assert(s);
3584 nc = pa_sink_volume_change_new(s);
3585
3586 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3587 * Adding one more volume for HW would get us rid of this, but I am trying
3588 * to survive with the ones we already have. */
3589 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3590
3591 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3592 pa_log_debug("Volume not changing");
3593 pa_sink_volume_change_free(nc);
3594 return;
3595 }
3596
3597 nc->at = pa_sink_get_latency_within_thread(s);
3598 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3599
3600 if (s->thread_info.volume_changes_tail) {
3601 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3602 /* If volume is going up let's do it a bit late. If it is going
3603 * down let's do it a bit early. */
3604 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3605 if (nc->at + safety_margin > c->at) {
3606 nc->at += safety_margin;
3607 direction = "up";
3608 break;
3609 }
3610 }
3611 else if (nc->at - safety_margin > c->at) {
3612 nc->at -= safety_margin;
3613 direction = "down";
3614 break;
3615 }
3616 }
3617 }
3618
3619 if (c == NULL) {
3620 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3621 nc->at += safety_margin;
3622 direction = "up";
3623 } else {
3624 nc->at -= safety_margin;
3625 direction = "down";
3626 }
3627 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3628 }
3629 else {
3630 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3631 }
3632
3633 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3634
3635 /* We can ignore volume events that came earlier but should happen later than this. */
3636 PA_LLIST_FOREACH(c, nc->next) {
3637 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3638 pa_sink_volume_change_free(c);
3639 }
3640 nc->next = NULL;
3641 s->thread_info.volume_changes_tail = nc;
3642 }
3643
3644 /* Called from the IO thread. */
3645 static void pa_sink_volume_change_flush(pa_sink *s) {
3646 pa_sink_volume_change *c = s->thread_info.volume_changes;
3647 pa_assert(s);
3648 s->thread_info.volume_changes = NULL;
3649 s->thread_info.volume_changes_tail = NULL;
3650 while (c) {
3651 pa_sink_volume_change *next = c->next;
3652 pa_sink_volume_change_free(c);
3653 c = next;
3654 }
3655 }
3656
3657 /* Called from the IO thread. */
3658 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3659 pa_usec_t now;
3660 pa_bool_t ret = FALSE;
3661
3662 pa_assert(s);
3663
3664 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3665 if (usec_to_next)
3666 *usec_to_next = 0;
3667 return ret;
3668 }
3669
3670 pa_assert(s->write_volume);
3671
3672 now = pa_rtclock_now();
3673
3674 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3675 pa_sink_volume_change *c = s->thread_info.volume_changes;
3676 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3677 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3678 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3679 ret = TRUE;
3680 s->thread_info.current_hw_volume = c->hw_volume;
3681 pa_sink_volume_change_free(c);
3682 }
3683
3684 if (ret)
3685 s->write_volume(s);
3686
3687 if (s->thread_info.volume_changes) {
3688 if (usec_to_next)
3689 *usec_to_next = s->thread_info.volume_changes->at - now;
3690 if (pa_log_ratelimit(PA_LOG_DEBUG))
3691 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3692 }
3693 else {
3694 if (usec_to_next)
3695 *usec_to_next = 0;
3696 s->thread_info.volume_changes_tail = NULL;
3697 }
3698 return ret;
3699 }
3700
3701 /* Called from the IO thread. */
3702 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3703 /* All the queued volume events later than current latency are shifted to happen earlier. */
3704 pa_sink_volume_change *c;
3705 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3706 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3707 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3708
3709 pa_log_debug("latency = %lld", (long long) limit);
3710 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3711
3712 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3713 pa_usec_t modified_limit = limit;
3714 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3715 modified_limit -= s->thread_info.volume_change_safety_margin;
3716 else
3717 modified_limit += s->thread_info.volume_change_safety_margin;
3718 if (c->at > modified_limit) {
3719 c->at -= rewound;
3720 if (c->at < modified_limit)
3721 c->at = modified_limit;
3722 }
3723 prev_vol = pa_cvolume_avg(&c->hw_volume);
3724 }
3725 pa_sink_volume_change_apply(s, NULL);
3726 }
3727
3728 /* Called from the main thread */
3729 /* Gets the list of formats supported by the sink. The members and idxset must
3730 * be freed by the caller. */
3731 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3732 pa_idxset *ret;
3733
3734 pa_assert(s);
3735
3736 if (s->get_formats) {
3737 /* Sink supports format query, all is good */
3738 ret = s->get_formats(s);
3739 } else {
3740 /* Sink doesn't support format query, so assume it does PCM */
3741 pa_format_info *f = pa_format_info_new();
3742 f->encoding = PA_ENCODING_PCM;
3743
3744 ret = pa_idxset_new(NULL, NULL);
3745 pa_idxset_put(ret, f, NULL);
3746 }
3747
3748 return ret;
3749 }
3750
3751 /* Called from the main thread */
3752 /* Allows an external source to set what formats a sink supports if the sink
3753 * permits this. The function makes a copy of the formats on success. */
3754 pa_bool_t pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3755 pa_assert(s);
3756 pa_assert(formats);
3757
3758 if (s->set_formats)
3759 /* Sink supports setting formats -- let's give it a shot */
3760 return s->set_formats(s, formats);
3761 else
3762 /* Sink doesn't support setting this -- bail out */
3763 return FALSE;
3764 }
3765
3766 /* Called from the main thread */
3767 /* Checks if the sink can accept this format */
3768 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3769 {
3770 pa_idxset *formats = NULL;
3771 pa_bool_t ret = FALSE;
3772
3773 pa_assert(s);
3774 pa_assert(f);
3775
3776 formats = pa_sink_get_formats(s);
3777
3778 if (formats) {
3779 pa_format_info *finfo_device;
3780 uint32_t i;
3781
3782 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3783 if (pa_format_info_is_compatible(finfo_device, f)) {
3784 ret = TRUE;
3785 break;
3786 }
3787 }
3788
3789 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
3790 }
3791
3792 return ret;
3793 }
3794
3795 /* Called from the main thread */
3796 /* Calculates the intersection between formats supported by the sink and
3797 * in_formats, and returns these, in the order of the sink's formats. */
3798 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3799 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3800 pa_format_info *f_sink, *f_in;
3801 uint32_t i, j;
3802
3803 pa_assert(s);
3804
3805 if (!in_formats || pa_idxset_isempty(in_formats))
3806 goto done;
3807
3808 sink_formats = pa_sink_get_formats(s);
3809
3810 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3811 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3812 if (pa_format_info_is_compatible(f_sink, f_in))
3813 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3814 }
3815 }
3816
3817 done:
3818 if (sink_formats)
3819 pa_idxset_free(sink_formats, (pa_free_cb_t) pa_format_info_free);
3820
3821 return out_formats;
3822 }