]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
device-port: Remove pa_device_port_hashmap_free()
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
39
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
51
52 #include "sink.h"
53
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59
60 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
61
62 struct pa_sink_volume_change {
63 pa_usec_t at;
64 pa_cvolume hw_volume;
65
66 PA_LLIST_FIELDS(pa_sink_volume_change);
67 };
68
69 struct sink_message_set_port {
70 pa_device_port *port;
71 int ret;
72 };
73
74 static void sink_free(pa_object *s);
75
76 static void pa_sink_volume_change_push(pa_sink *s);
77 static void pa_sink_volume_change_flush(pa_sink *s);
78 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
79
80 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
81 pa_assert(data);
82
83 pa_zero(*data);
84 data->proplist = pa_proplist_new();
85 data->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
86
87 return data;
88 }
89
90 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
91 pa_assert(data);
92
93 pa_xfree(data->name);
94 data->name = pa_xstrdup(name);
95 }
96
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
98 pa_assert(data);
99
100 if ((data->sample_spec_is_set = !!spec))
101 data->sample_spec = *spec;
102 }
103
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
105 pa_assert(data);
106
107 if ((data->channel_map_is_set = !!map))
108 data->channel_map = *map;
109 }
110
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data *data, const uint32_t alternate_sample_rate) {
112 pa_assert(data);
113
114 data->alternate_sample_rate_is_set = TRUE;
115 data->alternate_sample_rate = alternate_sample_rate;
116 }
117
118 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
119 pa_assert(data);
120
121 if ((data->volume_is_set = !!volume))
122 data->volume = *volume;
123 }
124
125 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
126 pa_assert(data);
127
128 data->muted_is_set = TRUE;
129 data->muted = !!mute;
130 }
131
132 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
133 pa_assert(data);
134
135 pa_xfree(data->active_port);
136 data->active_port = pa_xstrdup(port);
137 }
138
139 void pa_sink_new_data_done(pa_sink_new_data *data) {
140 pa_assert(data);
141
142 pa_proplist_free(data->proplist);
143
144 if (data->ports)
145 pa_hashmap_free(data->ports, (pa_free_cb_t) pa_device_port_unref);
146
147 pa_xfree(data->name);
148 pa_xfree(data->active_port);
149 }
150
151
152 /* Called from main context */
153 static void reset_callbacks(pa_sink *s) {
154 pa_assert(s);
155
156 s->set_state = NULL;
157 s->get_volume = NULL;
158 s->set_volume = NULL;
159 s->write_volume = NULL;
160 s->get_mute = NULL;
161 s->set_mute = NULL;
162 s->request_rewind = NULL;
163 s->update_requested_latency = NULL;
164 s->set_port = NULL;
165 s->get_formats = NULL;
166 s->set_formats = NULL;
167 s->update_rate = NULL;
168 }
169
170 /* Called from main context */
171 pa_sink* pa_sink_new(
172 pa_core *core,
173 pa_sink_new_data *data,
174 pa_sink_flags_t flags) {
175
176 pa_sink *s;
177 const char *name;
178 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
179 pa_source_new_data source_data;
180 const char *dn;
181 char *pt;
182
183 pa_assert(core);
184 pa_assert(data);
185 pa_assert(data->name);
186 pa_assert_ctl_context();
187
188 s = pa_msgobject_new(pa_sink);
189
190 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
191 pa_log_debug("Failed to register name %s.", data->name);
192 pa_xfree(s);
193 return NULL;
194 }
195
196 pa_sink_new_data_set_name(data, name);
197
198 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
199 pa_xfree(s);
200 pa_namereg_unregister(core, name);
201 return NULL;
202 }
203
204 /* FIXME, need to free s here on failure */
205
206 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
207 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
208
209 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
210
211 if (!data->channel_map_is_set)
212 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
213
214 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
215 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
216
217 /* FIXME: There should probably be a general function for checking whether
218 * the sink volume is allowed to be set, like there is for sink inputs. */
219 pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
220
221 if (!data->volume_is_set) {
222 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
223 data->save_volume = FALSE;
224 }
225
226 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
227 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
228
229 if (!data->muted_is_set)
230 data->muted = FALSE;
231
232 if (data->card)
233 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
234
235 pa_device_init_description(data->proplist);
236 pa_device_init_icon(data->proplist, TRUE);
237 pa_device_init_intended_roles(data->proplist);
238
239 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
240 pa_xfree(s);
241 pa_namereg_unregister(core, name);
242 return NULL;
243 }
244
245 s->parent.parent.free = sink_free;
246 s->parent.process_msg = pa_sink_process_msg;
247
248 s->core = core;
249 s->state = PA_SINK_INIT;
250 s->flags = flags;
251 s->priority = 0;
252 s->suspend_cause = data->suspend_cause;
253 pa_sink_set_mixer_dirty(s, FALSE);
254 s->name = pa_xstrdup(name);
255 s->proplist = pa_proplist_copy(data->proplist);
256 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
257 s->module = data->module;
258 s->card = data->card;
259
260 s->priority = pa_device_init_priority(s->proplist);
261
262 s->sample_spec = data->sample_spec;
263 s->channel_map = data->channel_map;
264 s->default_sample_rate = s->sample_spec.rate;
265
266 if (data->alternate_sample_rate_is_set)
267 s->alternate_sample_rate = data->alternate_sample_rate;
268 else
269 s->alternate_sample_rate = s->core->alternate_sample_rate;
270
271 if (s->sample_spec.rate == s->alternate_sample_rate) {
272 pa_log_warn("Default and alternate sample rates are the same.");
273 s->alternate_sample_rate = 0;
274 }
275
276 s->inputs = pa_idxset_new(NULL, NULL);
277 s->n_corked = 0;
278 s->input_to_master = NULL;
279
280 s->reference_volume = s->real_volume = data->volume;
281 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
282 s->base_volume = PA_VOLUME_NORM;
283 s->n_volume_steps = PA_VOLUME_NORM+1;
284 s->muted = data->muted;
285 s->refresh_volume = s->refresh_muted = FALSE;
286
287 reset_callbacks(s);
288 s->userdata = NULL;
289
290 s->asyncmsgq = NULL;
291
292 /* As a minor optimization we just steal the list instead of
293 * copying it here */
294 s->ports = data->ports;
295 data->ports = NULL;
296
297 s->active_port = NULL;
298 s->save_port = FALSE;
299
300 if (data->active_port)
301 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
302 s->save_port = data->save_port;
303
304 if (!s->active_port) {
305 void *state;
306 pa_device_port *p;
307
308 PA_HASHMAP_FOREACH(p, s->ports, state)
309 if (!s->active_port || p->priority > s->active_port->priority)
310 s->active_port = p;
311 }
312
313 if (s->active_port)
314 s->latency_offset = s->active_port->latency_offset;
315 else
316 s->latency_offset = 0;
317
318 s->save_volume = data->save_volume;
319 s->save_muted = data->save_muted;
320
321 pa_silence_memchunk_get(
322 &core->silence_cache,
323 core->mempool,
324 &s->silence,
325 &s->sample_spec,
326 0);
327
328 s->thread_info.rtpoll = NULL;
329 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
330 s->thread_info.soft_volume = s->soft_volume;
331 s->thread_info.soft_muted = s->muted;
332 s->thread_info.state = s->state;
333 s->thread_info.rewind_nbytes = 0;
334 s->thread_info.rewind_requested = FALSE;
335 s->thread_info.max_rewind = 0;
336 s->thread_info.max_request = 0;
337 s->thread_info.requested_latency_valid = FALSE;
338 s->thread_info.requested_latency = 0;
339 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
340 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
341 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
342
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
344 s->thread_info.volume_changes_tail = NULL;
345 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
346 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
347 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
348 s->thread_info.latency_offset = s->latency_offset;
349
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
352
353 if (s->card)
354 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
355
356 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
358 s->index,
359 s->name,
360 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
361 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
362 pt);
363 pa_xfree(pt);
364
365 pa_source_new_data_init(&source_data);
366 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
367 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
368 pa_source_new_data_set_alternate_sample_rate(&source_data, s->alternate_sample_rate);
369 source_data.name = pa_sprintf_malloc("%s.monitor", name);
370 source_data.driver = data->driver;
371 source_data.module = data->module;
372 source_data.card = data->card;
373
374 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
375 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
376 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
377
378 s->monitor_source = pa_source_new(core, &source_data,
379 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
380 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
381
382 pa_source_new_data_done(&source_data);
383
384 if (!s->monitor_source) {
385 pa_sink_unlink(s);
386 pa_sink_unref(s);
387 return NULL;
388 }
389
390 s->monitor_source->monitor_of = s;
391
392 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
393 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
394 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
395
396 return s;
397 }
398
399 /* Called from main context */
400 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
401 int ret;
402 pa_bool_t suspend_change;
403 pa_sink_state_t original_state;
404
405 pa_assert(s);
406 pa_assert_ctl_context();
407
408 if (s->state == state)
409 return 0;
410
411 original_state = s->state;
412
413 suspend_change =
414 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
415 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
416
417 if (s->set_state)
418 if ((ret = s->set_state(s, state)) < 0)
419 return ret;
420
421 if (s->asyncmsgq)
422 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
423
424 if (s->set_state)
425 s->set_state(s, original_state);
426
427 return ret;
428 }
429
430 s->state = state;
431
432 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
435 }
436
437 if (suspend_change) {
438 pa_sink_input *i;
439 uint32_t idx;
440
441 /* We're suspending or resuming, tell everyone about it */
442
443 PA_IDXSET_FOREACH(i, s->inputs, idx)
444 if (s->state == PA_SINK_SUSPENDED &&
445 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
446 pa_sink_input_kill(i);
447 else if (i->suspend)
448 i->suspend(i, state == PA_SINK_SUSPENDED);
449
450 if (s->monitor_source)
451 pa_source_sync_suspend(s->monitor_source);
452 }
453
454 return 0;
455 }
456
457 void pa_sink_set_get_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
458 pa_assert(s);
459
460 s->get_volume = cb;
461 }
462
463 void pa_sink_set_set_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
464 pa_sink_flags_t flags;
465
466 pa_assert(s);
467 pa_assert(!s->write_volume || cb);
468
469 s->set_volume = cb;
470
471 /* Save the current flags so we can tell if they've changed */
472 flags = s->flags;
473
474 if (cb) {
475 /* The sink implementor is responsible for setting decibel volume support */
476 s->flags |= PA_SINK_HW_VOLUME_CTRL;
477 } else {
478 s->flags &= ~PA_SINK_HW_VOLUME_CTRL;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s, !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
481 }
482
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SINK_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 }
487
488 void pa_sink_set_write_volume_callback(pa_sink *s, pa_sink_cb_t cb) {
489 pa_sink_flags_t flags;
490
491 pa_assert(s);
492 pa_assert(!cb || s->set_volume);
493
494 s->write_volume = cb;
495
496 /* Save the current flags so we can tell if they've changed */
497 flags = s->flags;
498
499 if (cb)
500 s->flags |= PA_SINK_DEFERRED_VOLUME;
501 else
502 s->flags &= ~PA_SINK_DEFERRED_VOLUME;
503
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SINK_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 }
508
509 void pa_sink_set_get_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
510 pa_assert(s);
511
512 s->get_mute = cb;
513 }
514
515 void pa_sink_set_set_mute_callback(pa_sink *s, pa_sink_cb_t cb) {
516 pa_sink_flags_t flags;
517
518 pa_assert(s);
519
520 s->set_mute = cb;
521
522 /* Save the current flags so we can tell if they've changed */
523 flags = s->flags;
524
525 if (cb)
526 s->flags |= PA_SINK_HW_MUTE_CTRL;
527 else
528 s->flags &= ~PA_SINK_HW_MUTE_CTRL;
529
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s->state != PA_SINK_INIT && flags != s->flags)
532 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
533 }
534
535 static void enable_flat_volume(pa_sink *s, pa_bool_t enable) {
536 pa_sink_flags_t flags;
537
538 pa_assert(s);
539
540 /* Always follow the overall user preference here */
541 enable = enable && s->core->flat_volumes;
542
543 /* Save the current flags so we can tell if they've changed */
544 flags = s->flags;
545
546 if (enable)
547 s->flags |= PA_SINK_FLAT_VOLUME;
548 else
549 s->flags &= ~PA_SINK_FLAT_VOLUME;
550
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s->state != PA_SINK_INIT && flags != s->flags)
553 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
554 }
555
556 void pa_sink_enable_decibel_volume(pa_sink *s, pa_bool_t enable) {
557 pa_sink_flags_t flags;
558
559 pa_assert(s);
560
561 /* Save the current flags so we can tell if they've changed */
562 flags = s->flags;
563
564 if (enable) {
565 s->flags |= PA_SINK_DECIBEL_VOLUME;
566 enable_flat_volume(s, TRUE);
567 } else {
568 s->flags &= ~PA_SINK_DECIBEL_VOLUME;
569 enable_flat_volume(s, FALSE);
570 }
571
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s->state != PA_SINK_INIT && flags != s->flags)
574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
575 }
576
577 /* Called from main context */
578 void pa_sink_put(pa_sink* s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
581
582 pa_assert(s->state == PA_SINK_INIT);
583 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
584
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s->asyncmsgq);
587 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
588
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
592 *
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
596 *
597 * Note: All of these flags set here can change over the life time
598 * of the sink. */
599 pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
600 pa_assert(!(s->flags & PA_SINK_DEFERRED_VOLUME) || s->write_volume);
601 pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
602
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
612 *
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
615 pa_sink_enable_decibel_volume(s, TRUE);
616
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s->flags & PA_SINK_DECIBEL_VOLUME))
620 enable_flat_volume(s, TRUE);
621
622 if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
623 pa_sink *root_sink = pa_sink_get_master(s);
624
625 pa_assert(root_sink);
626
627 s->reference_volume = root_sink->reference_volume;
628 pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
629
630 s->real_volume = root_sink->real_volume;
631 pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
632 } else
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s->reference_volume = s->real_volume;
637
638 s->thread_info.soft_volume = s->soft_volume;
639 s->thread_info.soft_muted = s->muted;
640 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
641
642 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
643 || (s->base_volume == PA_VOLUME_NORM
644 && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
645 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
646 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
647 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
648 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
649
650 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
651 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
652 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
653
654 if (s->suspend_cause)
655 pa_assert_se(sink_set_state(s, PA_SINK_SUSPENDED) == 0);
656 else
657 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
658
659 pa_source_put(s->monitor_source);
660
661 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
663 }
664
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink* s) {
667 pa_bool_t linked;
668 pa_sink_input *i, *j = NULL;
669
670 pa_assert(s);
671 pa_assert_ctl_context();
672
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
676
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
679 * effects. */
680
681 linked = PA_SINK_IS_LINKED(s->state);
682
683 if (linked)
684 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
685
686 if (s->state != PA_SINK_UNLINKED)
687 pa_namereg_unregister(s->core, s->name);
688 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
689
690 if (s->card)
691 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
692
693 while ((i = pa_idxset_first(s->inputs, NULL))) {
694 pa_assert(i != j);
695 pa_sink_input_kill(i);
696 j = i;
697 }
698
699 if (linked)
700 sink_set_state(s, PA_SINK_UNLINKED);
701 else
702 s->state = PA_SINK_UNLINKED;
703
704 reset_callbacks(s);
705
706 if (s->monitor_source)
707 pa_source_unlink(s->monitor_source);
708
709 if (linked) {
710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
712 }
713 }
714
715 /* Called from main context */
716 static void sink_free(pa_object *o) {
717 pa_sink *s = PA_SINK(o);
718
719 pa_assert(s);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s) == 0);
722
723 if (PA_SINK_IS_LINKED(s->state))
724 pa_sink_unlink(s);
725
726 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
727
728 if (s->monitor_source) {
729 pa_source_unref(s->monitor_source);
730 s->monitor_source = NULL;
731 }
732
733 pa_idxset_free(s->inputs, NULL, NULL);
734 pa_hashmap_free(s->thread_info.inputs, (pa_free_cb_t) pa_sink_input_unref);
735
736 if (s->silence.memblock)
737 pa_memblock_unref(s->silence.memblock);
738
739 pa_xfree(s->name);
740 pa_xfree(s->driver);
741
742 if (s->proplist)
743 pa_proplist_free(s->proplist);
744
745 if (s->ports)
746 pa_hashmap_free(s->ports, (pa_free_cb_t) pa_device_port_unref);
747
748 pa_xfree(s);
749 }
750
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
753 pa_sink_assert_ref(s);
754 pa_assert_ctl_context();
755
756 s->asyncmsgq = q;
757
758 if (s->monitor_source)
759 pa_source_set_asyncmsgq(s->monitor_source, q);
760 }
761
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
764 pa_sink_assert_ref(s);
765 pa_assert_ctl_context();
766
767 if (mask == 0)
768 return;
769
770 /* For now, allow only a minimal set of flags to be changed. */
771 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
772
773 s->flags = (s->flags & ~mask) | (value & mask);
774
775 pa_source_update_flags(s->monitor_source,
776 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
777 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
778 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
779 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
780 }
781
782 /* Called from IO context, or before _put() from main context */
783 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
784 pa_sink_assert_ref(s);
785 pa_sink_assert_io_context(s);
786
787 s->thread_info.rtpoll = p;
788
789 if (s->monitor_source)
790 pa_source_set_rtpoll(s->monitor_source, p);
791 }
792
793 /* Called from main context */
794 int pa_sink_update_status(pa_sink*s) {
795 pa_sink_assert_ref(s);
796 pa_assert_ctl_context();
797 pa_assert(PA_SINK_IS_LINKED(s->state));
798
799 if (s->state == PA_SINK_SUSPENDED)
800 return 0;
801
802 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
803 }
804
805 /* Called from any context - must be threadsafe */
806 void pa_sink_set_mixer_dirty(pa_sink *s, pa_bool_t is_dirty)
807 {
808 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
809 }
810
811 /* Called from main context */
812 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
813 pa_sink_assert_ref(s);
814 pa_assert_ctl_context();
815 pa_assert(PA_SINK_IS_LINKED(s->state));
816 pa_assert(cause != 0);
817
818 if (suspend) {
819 s->suspend_cause |= cause;
820 s->monitor_source->suspend_cause |= cause;
821 } else {
822 s->suspend_cause &= ~cause;
823 s->monitor_source->suspend_cause &= ~cause;
824 }
825
826 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
827 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
828 it'll be handled just fine. */
829 pa_sink_set_mixer_dirty(s, FALSE);
830 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
831 if (s->active_port && s->set_port) {
832 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
833 struct sink_message_set_port msg = { .port = s->active_port, .ret = 0 };
834 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
835 }
836 else
837 s->set_port(s, s->active_port);
838 }
839 else {
840 if (s->set_mute)
841 s->set_mute(s);
842 if (s->set_volume)
843 s->set_volume(s);
844 }
845 }
846
847 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
848 return 0;
849
850 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
851
852 if (s->suspend_cause)
853 return sink_set_state(s, PA_SINK_SUSPENDED);
854 else
855 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
856 }
857
858 /* Called from main context */
859 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
860 pa_sink_input *i, *n;
861 uint32_t idx;
862
863 pa_sink_assert_ref(s);
864 pa_assert_ctl_context();
865 pa_assert(PA_SINK_IS_LINKED(s->state));
866
867 if (!q)
868 q = pa_queue_new();
869
870 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
871 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
872
873 pa_sink_input_ref(i);
874
875 if (pa_sink_input_start_move(i) >= 0)
876 pa_queue_push(q, i);
877 else
878 pa_sink_input_unref(i);
879 }
880
881 return q;
882 }
883
884 /* Called from main context */
885 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
886 pa_sink_input *i;
887
888 pa_sink_assert_ref(s);
889 pa_assert_ctl_context();
890 pa_assert(PA_SINK_IS_LINKED(s->state));
891 pa_assert(q);
892
893 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
894 if (pa_sink_input_finish_move(i, s, save) < 0)
895 pa_sink_input_fail_move(i);
896
897 pa_sink_input_unref(i);
898 }
899
900 pa_queue_free(q, NULL);
901 }
902
903 /* Called from main context */
904 void pa_sink_move_all_fail(pa_queue *q) {
905 pa_sink_input *i;
906
907 pa_assert_ctl_context();
908 pa_assert(q);
909
910 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
911 pa_sink_input_fail_move(i);
912 pa_sink_input_unref(i);
913 }
914
915 pa_queue_free(q, NULL);
916 }
917
918 /* Called from IO thread context */
919 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
920 pa_sink_input *i;
921 void *state = NULL;
922
923 pa_sink_assert_ref(s);
924 pa_sink_assert_io_context(s);
925 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
926
927 /* If nobody requested this and this is actually no real rewind
928 * then we can short cut this. Please note that this means that
929 * not all rewind requests triggered upstream will always be
930 * translated in actual requests! */
931 if (!s->thread_info.rewind_requested && nbytes <= 0)
932 return;
933
934 s->thread_info.rewind_nbytes = 0;
935 s->thread_info.rewind_requested = FALSE;
936
937 if (nbytes > 0) {
938 pa_log_debug("Processing rewind...");
939 if (s->flags & PA_SINK_DEFERRED_VOLUME)
940 pa_sink_volume_change_rewind(s, nbytes);
941 }
942
943 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
944 pa_sink_input_assert_ref(i);
945 pa_sink_input_process_rewind(i, nbytes);
946 }
947
948 if (nbytes > 0) {
949 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
950 pa_source_process_rewind(s->monitor_source, nbytes);
951 }
952 }
953
954 /* Called from IO thread context */
955 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
956 pa_sink_input *i;
957 unsigned n = 0;
958 void *state = NULL;
959 size_t mixlength = *length;
960
961 pa_sink_assert_ref(s);
962 pa_sink_assert_io_context(s);
963 pa_assert(info);
964
965 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
966 pa_sink_input_assert_ref(i);
967
968 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
969
970 if (mixlength == 0 || info->chunk.length < mixlength)
971 mixlength = info->chunk.length;
972
973 if (pa_memblock_is_silence(info->chunk.memblock)) {
974 pa_memblock_unref(info->chunk.memblock);
975 continue;
976 }
977
978 info->userdata = pa_sink_input_ref(i);
979
980 pa_assert(info->chunk.memblock);
981 pa_assert(info->chunk.length > 0);
982
983 info++;
984 n++;
985 maxinfo--;
986 }
987
988 if (mixlength > 0)
989 *length = mixlength;
990
991 return n;
992 }
993
994 /* Called from IO thread context */
995 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
996 pa_sink_input *i;
997 void *state;
998 unsigned p = 0;
999 unsigned n_unreffed = 0;
1000
1001 pa_sink_assert_ref(s);
1002 pa_sink_assert_io_context(s);
1003 pa_assert(result);
1004 pa_assert(result->memblock);
1005 pa_assert(result->length > 0);
1006
1007 /* We optimize for the case where the order of the inputs has not changed */
1008
1009 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1010 unsigned j;
1011 pa_mix_info* m = NULL;
1012
1013 pa_sink_input_assert_ref(i);
1014
1015 /* Let's try to find the matching entry info the pa_mix_info array */
1016 for (j = 0; j < n; j ++) {
1017
1018 if (info[p].userdata == i) {
1019 m = info + p;
1020 break;
1021 }
1022
1023 p++;
1024 if (p >= n)
1025 p = 0;
1026 }
1027
1028 /* Drop read data */
1029 pa_sink_input_drop(i, result->length);
1030
1031 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
1032
1033 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
1034 void *ostate = NULL;
1035 pa_source_output *o;
1036 pa_memchunk c;
1037
1038 if (m && m->chunk.memblock) {
1039 c = m->chunk;
1040 pa_memblock_ref(c.memblock);
1041 pa_assert(result->length <= c.length);
1042 c.length = result->length;
1043
1044 pa_memchunk_make_writable(&c, 0);
1045 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
1046 } else {
1047 c = s->silence;
1048 pa_memblock_ref(c.memblock);
1049 pa_assert(result->length <= c.length);
1050 c.length = result->length;
1051 }
1052
1053 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
1054 pa_source_output_assert_ref(o);
1055 pa_assert(o->direct_on_input == i);
1056 pa_source_post_direct(s->monitor_source, o, &c);
1057 }
1058
1059 pa_memblock_unref(c.memblock);
1060 }
1061 }
1062
1063 if (m) {
1064 if (m->chunk.memblock)
1065 pa_memblock_unref(m->chunk.memblock);
1066 pa_memchunk_reset(&m->chunk);
1067
1068 pa_sink_input_unref(m->userdata);
1069 m->userdata = NULL;
1070
1071 n_unreffed += 1;
1072 }
1073 }
1074
1075 /* Now drop references to entries that are included in the
1076 * pa_mix_info array but don't exist anymore */
1077
1078 if (n_unreffed < n) {
1079 for (; n > 0; info++, n--) {
1080 if (info->userdata)
1081 pa_sink_input_unref(info->userdata);
1082 if (info->chunk.memblock)
1083 pa_memblock_unref(info->chunk.memblock);
1084 }
1085 }
1086
1087 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
1088 pa_source_post(s->monitor_source, result);
1089 }
1090
1091 /* Called from IO thread context */
1092 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
1093 pa_mix_info info[MAX_MIX_CHANNELS];
1094 unsigned n;
1095 size_t block_size_max;
1096
1097 pa_sink_assert_ref(s);
1098 pa_sink_assert_io_context(s);
1099 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1100 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1101 pa_assert(result);
1102
1103 pa_assert(!s->thread_info.rewind_requested);
1104 pa_assert(s->thread_info.rewind_nbytes == 0);
1105
1106 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1107 result->memblock = pa_memblock_ref(s->silence.memblock);
1108 result->index = s->silence.index;
1109 result->length = PA_MIN(s->silence.length, length);
1110 return;
1111 }
1112
1113 pa_sink_ref(s);
1114
1115 if (length <= 0)
1116 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
1117
1118 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1119 if (length > block_size_max)
1120 length = pa_frame_align(block_size_max, &s->sample_spec);
1121
1122 pa_assert(length > 0);
1123
1124 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1125
1126 if (n == 0) {
1127
1128 *result = s->silence;
1129 pa_memblock_ref(result->memblock);
1130
1131 if (result->length > length)
1132 result->length = length;
1133
1134 } else if (n == 1) {
1135 pa_cvolume volume;
1136
1137 *result = info[0].chunk;
1138 pa_memblock_ref(result->memblock);
1139
1140 if (result->length > length)
1141 result->length = length;
1142
1143 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1144
1145 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1146 pa_memblock_unref(result->memblock);
1147 pa_silence_memchunk_get(&s->core->silence_cache,
1148 s->core->mempool,
1149 result,
1150 &s->sample_spec,
1151 result->length);
1152 } else if (!pa_cvolume_is_norm(&volume)) {
1153 pa_memchunk_make_writable(result, 0);
1154 pa_volume_memchunk(result, &s->sample_spec, &volume);
1155 }
1156 } else {
1157 void *ptr;
1158 result->memblock = pa_memblock_new(s->core->mempool, length);
1159
1160 ptr = pa_memblock_acquire(result->memblock);
1161 result->length = pa_mix(info, n,
1162 ptr, length,
1163 &s->sample_spec,
1164 &s->thread_info.soft_volume,
1165 s->thread_info.soft_muted);
1166 pa_memblock_release(result->memblock);
1167
1168 result->index = 0;
1169 }
1170
1171 inputs_drop(s, info, n, result);
1172
1173 pa_sink_unref(s);
1174 }
1175
1176 /* Called from IO thread context */
1177 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
1178 pa_mix_info info[MAX_MIX_CHANNELS];
1179 unsigned n;
1180 size_t length, block_size_max;
1181
1182 pa_sink_assert_ref(s);
1183 pa_sink_assert_io_context(s);
1184 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1185 pa_assert(target);
1186 pa_assert(target->memblock);
1187 pa_assert(target->length > 0);
1188 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1189
1190 pa_assert(!s->thread_info.rewind_requested);
1191 pa_assert(s->thread_info.rewind_nbytes == 0);
1192
1193 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1194 pa_silence_memchunk(target, &s->sample_spec);
1195 return;
1196 }
1197
1198 pa_sink_ref(s);
1199
1200 length = target->length;
1201 block_size_max = pa_mempool_block_size_max(s->core->mempool);
1202 if (length > block_size_max)
1203 length = pa_frame_align(block_size_max, &s->sample_spec);
1204
1205 pa_assert(length > 0);
1206
1207 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
1208
1209 if (n == 0) {
1210 if (target->length > length)
1211 target->length = length;
1212
1213 pa_silence_memchunk(target, &s->sample_spec);
1214 } else if (n == 1) {
1215 pa_cvolume volume;
1216
1217 if (target->length > length)
1218 target->length = length;
1219
1220 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1221
1222 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1223 pa_silence_memchunk(target, &s->sample_spec);
1224 else {
1225 pa_memchunk vchunk;
1226
1227 vchunk = info[0].chunk;
1228 pa_memblock_ref(vchunk.memblock);
1229
1230 if (vchunk.length > length)
1231 vchunk.length = length;
1232
1233 if (!pa_cvolume_is_norm(&volume)) {
1234 pa_memchunk_make_writable(&vchunk, 0);
1235 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1236 }
1237
1238 pa_memchunk_memcpy(target, &vchunk);
1239 pa_memblock_unref(vchunk.memblock);
1240 }
1241
1242 } else {
1243 void *ptr;
1244
1245 ptr = pa_memblock_acquire(target->memblock);
1246
1247 target->length = pa_mix(info, n,
1248 (uint8_t*) ptr + target->index, length,
1249 &s->sample_spec,
1250 &s->thread_info.soft_volume,
1251 s->thread_info.soft_muted);
1252
1253 pa_memblock_release(target->memblock);
1254 }
1255
1256 inputs_drop(s, info, n, target);
1257
1258 pa_sink_unref(s);
1259 }
1260
1261 /* Called from IO thread context */
1262 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1263 pa_memchunk chunk;
1264 size_t l, d;
1265
1266 pa_sink_assert_ref(s);
1267 pa_sink_assert_io_context(s);
1268 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1269 pa_assert(target);
1270 pa_assert(target->memblock);
1271 pa_assert(target->length > 0);
1272 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1273
1274 pa_assert(!s->thread_info.rewind_requested);
1275 pa_assert(s->thread_info.rewind_nbytes == 0);
1276
1277 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1278 pa_silence_memchunk(target, &s->sample_spec);
1279 return;
1280 }
1281
1282 pa_sink_ref(s);
1283
1284 l = target->length;
1285 d = 0;
1286 while (l > 0) {
1287 chunk = *target;
1288 chunk.index += d;
1289 chunk.length -= d;
1290
1291 pa_sink_render_into(s, &chunk);
1292
1293 d += chunk.length;
1294 l -= chunk.length;
1295 }
1296
1297 pa_sink_unref(s);
1298 }
1299
1300 /* Called from IO thread context */
1301 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1302 pa_sink_assert_ref(s);
1303 pa_sink_assert_io_context(s);
1304 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1305 pa_assert(length > 0);
1306 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1307 pa_assert(result);
1308
1309 pa_assert(!s->thread_info.rewind_requested);
1310 pa_assert(s->thread_info.rewind_nbytes == 0);
1311
1312 pa_sink_ref(s);
1313
1314 pa_sink_render(s, length, result);
1315
1316 if (result->length < length) {
1317 pa_memchunk chunk;
1318
1319 pa_memchunk_make_writable(result, length);
1320
1321 chunk.memblock = result->memblock;
1322 chunk.index = result->index + result->length;
1323 chunk.length = length - result->length;
1324
1325 pa_sink_render_into_full(s, &chunk);
1326
1327 result->length = length;
1328 }
1329
1330 pa_sink_unref(s);
1331 }
1332
1333 /* Called from main thread */
1334 pa_bool_t pa_sink_update_rate(pa_sink *s, uint32_t rate, pa_bool_t passthrough)
1335 {
1336 if (s->update_rate) {
1337 uint32_t desired_rate = rate;
1338 uint32_t default_rate = s->default_sample_rate;
1339 uint32_t alternate_rate = s->alternate_sample_rate;
1340 uint32_t idx;
1341 pa_sink_input *i;
1342 pa_bool_t use_alternate = FALSE;
1343
1344 if (PA_UNLIKELY(default_rate == alternate_rate)) {
1345 pa_log_warn("Default and alternate sample rates are the same.");
1346 return FALSE;
1347 }
1348
1349 if (PA_SINK_IS_RUNNING(s->state)) {
1350 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1351 s->sample_spec.rate);
1352 return FALSE;
1353 }
1354
1355 if (s->monitor_source) {
1356 if (PA_SOURCE_IS_RUNNING(s->monitor_source->state) == TRUE) {
1357 pa_log_info("Cannot update rate, monitor source is RUNNING");
1358 return FALSE;
1359 }
1360 }
1361
1362 if (PA_UNLIKELY (desired_rate < 8000 ||
1363 desired_rate > PA_RATE_MAX))
1364 return FALSE;
1365
1366 if (!passthrough) {
1367 pa_assert(default_rate % 4000 || default_rate % 11025);
1368 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
1369
1370 if (default_rate % 4000) {
1371 /* default is a 11025 multiple */
1372 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1373 use_alternate=TRUE;
1374 } else {
1375 /* default is 4000 multiple */
1376 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1377 use_alternate=TRUE;
1378 }
1379
1380 if (use_alternate)
1381 desired_rate = alternate_rate;
1382 else
1383 desired_rate = default_rate;
1384 } else {
1385 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1386 }
1387
1388 if (desired_rate == s->sample_spec.rate)
1389 return FALSE;
1390
1391 if (!passthrough && pa_sink_used_by(s) > 0)
1392 return FALSE;
1393
1394 pa_log_debug("Suspending sink %s due to changing the sample rate.", s->name);
1395 pa_sink_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1396
1397 if (s->update_rate(s, desired_rate) == TRUE) {
1398 /* update monitor source as well */
1399 if (s->monitor_source && !passthrough)
1400 pa_source_update_rate(s->monitor_source, desired_rate, FALSE);
1401 pa_log_info("Changed sampling rate successfully");
1402
1403 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1404 if (i->state == PA_SINK_INPUT_CORKED)
1405 pa_sink_input_update_rate(i);
1406 }
1407
1408 return TRUE;
1409 }
1410 }
1411 return FALSE;
1412 }
1413
1414 /* Called from main thread */
1415 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1416 pa_usec_t usec = 0;
1417
1418 pa_sink_assert_ref(s);
1419 pa_assert_ctl_context();
1420 pa_assert(PA_SINK_IS_LINKED(s->state));
1421
1422 /* The returned value is supposed to be in the time domain of the sound card! */
1423
1424 if (s->state == PA_SINK_SUSPENDED)
1425 return 0;
1426
1427 if (!(s->flags & PA_SINK_LATENCY))
1428 return 0;
1429
1430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1431
1432 /* usec is unsigned, so check that the offset can be added to usec without
1433 * underflowing. */
1434 if (-s->latency_offset <= (int64_t) usec)
1435 usec += s->latency_offset;
1436 else
1437 usec = 0;
1438
1439 return usec;
1440 }
1441
1442 /* Called from IO thread */
1443 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1444 pa_usec_t usec = 0;
1445 pa_msgobject *o;
1446
1447 pa_sink_assert_ref(s);
1448 pa_sink_assert_io_context(s);
1449 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1450
1451 /* The returned value is supposed to be in the time domain of the sound card! */
1452
1453 if (s->thread_info.state == PA_SINK_SUSPENDED)
1454 return 0;
1455
1456 if (!(s->flags & PA_SINK_LATENCY))
1457 return 0;
1458
1459 o = PA_MSGOBJECT(s);
1460
1461 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1462
1463 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1464 return -1;
1465
1466 /* usec is unsigned, so check that the offset can be added to usec without
1467 * underflowing. */
1468 if (-s->thread_info.latency_offset <= (int64_t) usec)
1469 usec += s->thread_info.latency_offset;
1470 else
1471 usec = 0;
1472
1473 return usec;
1474 }
1475
1476 /* Called from the main thread (and also from the IO thread while the main
1477 * thread is waiting).
1478 *
1479 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1480 * set. Instead, flat volume mode is detected by checking whether the root sink
1481 * has the flag set. */
1482 pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
1483 pa_sink_assert_ref(s);
1484
1485 s = pa_sink_get_master(s);
1486
1487 if (PA_LIKELY(s))
1488 return (s->flags & PA_SINK_FLAT_VOLUME);
1489 else
1490 return FALSE;
1491 }
1492
1493 /* Called from the main thread (and also from the IO thread while the main
1494 * thread is waiting). */
1495 pa_sink *pa_sink_get_master(pa_sink *s) {
1496 pa_sink_assert_ref(s);
1497
1498 while (s && (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1499 if (PA_UNLIKELY(!s->input_to_master))
1500 return NULL;
1501
1502 s = s->input_to_master->sink;
1503 }
1504
1505 return s;
1506 }
1507
1508 /* Called from main context */
1509 pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
1510 pa_sink_input *alt_i;
1511 uint32_t idx;
1512
1513 pa_sink_assert_ref(s);
1514
1515 /* one and only one PASSTHROUGH input can possibly be connected */
1516 if (pa_idxset_size(s->inputs) == 1) {
1517 alt_i = pa_idxset_first(s->inputs, &idx);
1518
1519 if (pa_sink_input_is_passthrough(alt_i))
1520 return TRUE;
1521 }
1522
1523 return FALSE;
1524 }
1525
1526 /* Called from main context */
1527 void pa_sink_enter_passthrough(pa_sink *s) {
1528 pa_cvolume volume;
1529
1530 /* disable the monitor in passthrough mode */
1531 if (s->monitor_source) {
1532 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s->monitor_source->name);
1533 pa_source_suspend(s->monitor_source, TRUE, PA_SUSPEND_PASSTHROUGH);
1534 }
1535
1536 /* set the volume to NORM */
1537 s->saved_volume = *pa_sink_get_volume(s, TRUE);
1538 s->saved_save_volume = s->save_volume;
1539
1540 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1541 pa_sink_set_volume(s, &volume, TRUE, FALSE);
1542 }
1543
1544 /* Called from main context */
1545 void pa_sink_leave_passthrough(pa_sink *s) {
1546 /* Unsuspend monitor */
1547 if (s->monitor_source) {
1548 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s->monitor_source->name);
1549 pa_source_suspend(s->monitor_source, FALSE, PA_SUSPEND_PASSTHROUGH);
1550 }
1551
1552 /* Restore sink volume to what it was before we entered passthrough mode */
1553 pa_sink_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1554
1555 pa_cvolume_init(&s->saved_volume);
1556 s->saved_save_volume = FALSE;
1557 }
1558
1559 /* Called from main context. */
1560 static void compute_reference_ratio(pa_sink_input *i) {
1561 unsigned c = 0;
1562 pa_cvolume remapped;
1563
1564 pa_assert(i);
1565 pa_assert(pa_sink_flat_volume_enabled(i->sink));
1566
1567 /*
1568 * Calculates the reference ratio from the sink's reference
1569 * volume. This basically calculates:
1570 *
1571 * i->reference_ratio = i->volume / i->sink->reference_volume
1572 */
1573
1574 remapped = i->sink->reference_volume;
1575 pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
1576
1577 i->reference_ratio.channels = i->sample_spec.channels;
1578
1579 for (c = 0; c < i->sample_spec.channels; c++) {
1580
1581 /* We don't update when the sink volume is 0 anyway */
1582 if (remapped.values[c] <= PA_VOLUME_MUTED)
1583 continue;
1584
1585 /* Don't update the reference ratio unless necessary */
1586 if (pa_sw_volume_multiply(
1587 i->reference_ratio.values[c],
1588 remapped.values[c]) == i->volume.values[c])
1589 continue;
1590
1591 i->reference_ratio.values[c] = pa_sw_volume_divide(
1592 i->volume.values[c],
1593 remapped.values[c]);
1594 }
1595 }
1596
1597 /* Called from main context. Only called for the root sink in volume sharing
1598 * cases, except for internal recursive calls. */
1599 static void compute_reference_ratios(pa_sink *s) {
1600 uint32_t idx;
1601 pa_sink_input *i;
1602
1603 pa_sink_assert_ref(s);
1604 pa_assert_ctl_context();
1605 pa_assert(PA_SINK_IS_LINKED(s->state));
1606 pa_assert(pa_sink_flat_volume_enabled(s));
1607
1608 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1609 compute_reference_ratio(i);
1610
1611 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1612 compute_reference_ratios(i->origin_sink);
1613 }
1614 }
1615
1616 /* Called from main context. Only called for the root sink in volume sharing
1617 * cases, except for internal recursive calls. */
1618 static void compute_real_ratios(pa_sink *s) {
1619 pa_sink_input *i;
1620 uint32_t idx;
1621
1622 pa_sink_assert_ref(s);
1623 pa_assert_ctl_context();
1624 pa_assert(PA_SINK_IS_LINKED(s->state));
1625 pa_assert(pa_sink_flat_volume_enabled(s));
1626
1627 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1628 unsigned c;
1629 pa_cvolume remapped;
1630
1631 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1632 /* The origin sink uses volume sharing, so this input's real ratio
1633 * is handled as a special case - the real ratio must be 0 dB, and
1634 * as a result i->soft_volume must equal i->volume_factor. */
1635 pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
1636 i->soft_volume = i->volume_factor;
1637
1638 compute_real_ratios(i->origin_sink);
1639
1640 continue;
1641 }
1642
1643 /*
1644 * This basically calculates:
1645 *
1646 * i->real_ratio := i->volume / s->real_volume
1647 * i->soft_volume := i->real_ratio * i->volume_factor
1648 */
1649
1650 remapped = s->real_volume;
1651 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1652
1653 i->real_ratio.channels = i->sample_spec.channels;
1654 i->soft_volume.channels = i->sample_spec.channels;
1655
1656 for (c = 0; c < i->sample_spec.channels; c++) {
1657
1658 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1659 /* We leave i->real_ratio untouched */
1660 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1661 continue;
1662 }
1663
1664 /* Don't lose accuracy unless necessary */
1665 if (pa_sw_volume_multiply(
1666 i->real_ratio.values[c],
1667 remapped.values[c]) != i->volume.values[c])
1668
1669 i->real_ratio.values[c] = pa_sw_volume_divide(
1670 i->volume.values[c],
1671 remapped.values[c]);
1672
1673 i->soft_volume.values[c] = pa_sw_volume_multiply(
1674 i->real_ratio.values[c],
1675 i->volume_factor.values[c]);
1676 }
1677
1678 /* We don't copy the soft_volume to the thread_info data
1679 * here. That must be done by the caller */
1680 }
1681 }
1682
1683 static pa_cvolume *cvolume_remap_minimal_impact(
1684 pa_cvolume *v,
1685 const pa_cvolume *template,
1686 const pa_channel_map *from,
1687 const pa_channel_map *to) {
1688
1689 pa_cvolume t;
1690
1691 pa_assert(v);
1692 pa_assert(template);
1693 pa_assert(from);
1694 pa_assert(to);
1695 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1696 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1697
1698 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1699 * mapping from sink input to sink volumes:
1700 *
1701 * If template is a possible remapping from v it is used instead
1702 * of remapping anew.
1703 *
1704 * If the channel maps don't match we set an all-channel volume on
1705 * the sink to ensure that changing a volume on one stream has no
1706 * effect that cannot be compensated for in another stream that
1707 * does not have the same channel map as the sink. */
1708
1709 if (pa_channel_map_equal(from, to))
1710 return v;
1711
1712 t = *template;
1713 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1714 *v = *template;
1715 return v;
1716 }
1717
1718 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1719 return v;
1720 }
1721
1722 /* Called from main thread. Only called for the root sink in volume sharing
1723 * cases, except for internal recursive calls. */
1724 static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1725 pa_sink_input *i;
1726 uint32_t idx;
1727
1728 pa_sink_assert_ref(s);
1729 pa_assert(max_volume);
1730 pa_assert(channel_map);
1731 pa_assert(pa_sink_flat_volume_enabled(s));
1732
1733 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1734 pa_cvolume remapped;
1735
1736 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1737 get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
1738
1739 /* Ignore this input. The origin sink uses volume sharing, so this
1740 * input's volume will be set to be equal to the root sink's real
1741 * volume. Obviously this input's current volume must not then
1742 * affect what the root sink's real volume will be. */
1743 continue;
1744 }
1745
1746 remapped = i->volume;
1747 cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
1748 pa_cvolume_merge(max_volume, max_volume, &remapped);
1749 }
1750 }
1751
1752 /* Called from main thread. Only called for the root sink in volume sharing
1753 * cases, except for internal recursive calls. */
1754 static pa_bool_t has_inputs(pa_sink *s) {
1755 pa_sink_input *i;
1756 uint32_t idx;
1757
1758 pa_sink_assert_ref(s);
1759
1760 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1761 if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
1762 return TRUE;
1763 }
1764
1765 return FALSE;
1766 }
1767
1768 /* Called from main thread. Only called for the root sink in volume sharing
1769 * cases, except for internal recursive calls. */
1770 static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1771 pa_sink_input *i;
1772 uint32_t idx;
1773
1774 pa_sink_assert_ref(s);
1775 pa_assert(new_volume);
1776 pa_assert(channel_map);
1777
1778 s->real_volume = *new_volume;
1779 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1780
1781 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1782 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1783 if (pa_sink_flat_volume_enabled(s)) {
1784 pa_cvolume old_volume = i->volume;
1785
1786 /* Follow the root sink's real volume. */
1787 i->volume = *new_volume;
1788 pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
1789 compute_reference_ratio(i);
1790
1791 /* The volume changed, let's tell people so */
1792 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1793 if (i->volume_changed)
1794 i->volume_changed(i);
1795
1796 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1797 }
1798 }
1799
1800 update_real_volume(i->origin_sink, new_volume, channel_map);
1801 }
1802 }
1803 }
1804
1805 /* Called from main thread. Only called for the root sink in shared volume
1806 * cases. */
1807 static void compute_real_volume(pa_sink *s) {
1808 pa_sink_assert_ref(s);
1809 pa_assert_ctl_context();
1810 pa_assert(PA_SINK_IS_LINKED(s->state));
1811 pa_assert(pa_sink_flat_volume_enabled(s));
1812 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
1813
1814 /* This determines the maximum volume of all streams and sets
1815 * s->real_volume accordingly. */
1816
1817 if (!has_inputs(s)) {
1818 /* In the special case that we have no sink inputs we leave the
1819 * volume unmodified. */
1820 update_real_volume(s, &s->reference_volume, &s->channel_map);
1821 return;
1822 }
1823
1824 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1825
1826 /* First let's determine the new maximum volume of all inputs
1827 * connected to this sink */
1828 get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
1829 update_real_volume(s, &s->real_volume, &s->channel_map);
1830
1831 /* Then, let's update the real ratios/soft volumes of all inputs
1832 * connected to this sink */
1833 compute_real_ratios(s);
1834 }
1835
1836 /* Called from main thread. Only called for the root sink in shared volume
1837 * cases, except for internal recursive calls. */
1838 static void propagate_reference_volume(pa_sink *s) {
1839 pa_sink_input *i;
1840 uint32_t idx;
1841
1842 pa_sink_assert_ref(s);
1843 pa_assert_ctl_context();
1844 pa_assert(PA_SINK_IS_LINKED(s->state));
1845 pa_assert(pa_sink_flat_volume_enabled(s));
1846
1847 /* This is called whenever the sink volume changes that is not
1848 * caused by a sink input volume change. We need to fix up the
1849 * sink input volumes accordingly */
1850
1851 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1852 pa_cvolume old_volume;
1853
1854 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
1855 propagate_reference_volume(i->origin_sink);
1856
1857 /* Since the origin sink uses volume sharing, this input's volume
1858 * needs to be updated to match the root sink's real volume, but
1859 * that will be done later in update_shared_real_volume(). */
1860 continue;
1861 }
1862
1863 old_volume = i->volume;
1864
1865 /* This basically calculates:
1866 *
1867 * i->volume := s->reference_volume * i->reference_ratio */
1868
1869 i->volume = s->reference_volume;
1870 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
1871 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
1872
1873 /* The volume changed, let's tell people so */
1874 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1875
1876 if (i->volume_changed)
1877 i->volume_changed(i);
1878
1879 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1880 }
1881 }
1882 }
1883
1884 /* Called from main thread. Only called for the root sink in volume sharing
1885 * cases, except for internal recursive calls. The return value indicates
1886 * whether any reference volume actually changed. */
1887 static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1888 pa_cvolume volume;
1889 pa_bool_t reference_volume_changed;
1890 pa_sink_input *i;
1891 uint32_t idx;
1892
1893 pa_sink_assert_ref(s);
1894 pa_assert(PA_SINK_IS_LINKED(s->state));
1895 pa_assert(v);
1896 pa_assert(channel_map);
1897 pa_assert(pa_cvolume_valid(v));
1898
1899 volume = *v;
1900 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1901
1902 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1903 s->reference_volume = volume;
1904
1905 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1906
1907 if (reference_volume_changed)
1908 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1909 else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1910 /* If the root sink's volume doesn't change, then there can't be any
1911 * changes in the other sinks in the sink tree either.
1912 *
1913 * It's probably theoretically possible that even if the root sink's
1914 * volume changes slightly, some filter sink doesn't change its volume
1915 * due to rounding errors. If that happens, we still want to propagate
1916 * the changed root sink volume to the sinks connected to the
1917 * intermediate sink that didn't change its volume. This theoretical
1918 * possibility is the reason why we have that !(s->flags &
1919 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1920 * notice even if we returned here FALSE always if
1921 * reference_volume_changed is FALSE. */
1922 return FALSE;
1923
1924 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1925 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
1926 update_reference_volume(i->origin_sink, v, channel_map, FALSE);
1927 }
1928
1929 return TRUE;
1930 }
1931
1932 /* Called from main thread */
1933 void pa_sink_set_volume(
1934 pa_sink *s,
1935 const pa_cvolume *volume,
1936 pa_bool_t send_msg,
1937 pa_bool_t save) {
1938
1939 pa_cvolume new_reference_volume;
1940 pa_sink *root_sink;
1941
1942 pa_sink_assert_ref(s);
1943 pa_assert_ctl_context();
1944 pa_assert(PA_SINK_IS_LINKED(s->state));
1945 pa_assert(!volume || pa_cvolume_valid(volume));
1946 pa_assert(volume || pa_sink_flat_volume_enabled(s));
1947 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1948
1949 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
1950 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1951 if (pa_sink_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1952 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1953 return;
1954 }
1955
1956 /* In case of volume sharing, the volume is set for the root sink first,
1957 * from which it's then propagated to the sharing sinks. */
1958 root_sink = pa_sink_get_master(s);
1959
1960 if (PA_UNLIKELY(!root_sink))
1961 return;
1962
1963 /* As a special exception we accept mono volumes on all sinks --
1964 * even on those with more complex channel maps */
1965
1966 if (volume) {
1967 if (pa_cvolume_compatible(volume, &s->sample_spec))
1968 new_reference_volume = *volume;
1969 else {
1970 new_reference_volume = s->reference_volume;
1971 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1972 }
1973
1974 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
1975
1976 if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
1977 if (pa_sink_flat_volume_enabled(root_sink)) {
1978 /* OK, propagate this volume change back to the inputs */
1979 propagate_reference_volume(root_sink);
1980
1981 /* And now recalculate the real volume */
1982 compute_real_volume(root_sink);
1983 } else
1984 update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
1985 }
1986
1987 } else {
1988 /* If volume is NULL we synchronize the sink's real and
1989 * reference volumes with the stream volumes. */
1990
1991 pa_assert(pa_sink_flat_volume_enabled(root_sink));
1992
1993 /* Ok, let's determine the new real volume */
1994 compute_real_volume(root_sink);
1995
1996 /* Let's 'push' the reference volume if necessary */
1997 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
1998 /* If the sink and it's root don't have the same number of channels, we need to remap */
1999 if (s != root_sink && !pa_channel_map_equal(&s->channel_map, &root_sink->channel_map))
2000 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
2001 update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
2002
2003 /* Now that the reference volume is updated, we can update the streams'
2004 * reference ratios. */
2005 compute_reference_ratios(root_sink);
2006 }
2007
2008 if (root_sink->set_volume) {
2009 /* If we have a function set_volume(), then we do not apply a
2010 * soft volume by default. However, set_volume() is free to
2011 * apply one to root_sink->soft_volume */
2012
2013 pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
2014 if (!(root_sink->flags & PA_SINK_DEFERRED_VOLUME))
2015 root_sink->set_volume(root_sink);
2016
2017 } else
2018 /* If we have no function set_volume(), then the soft volume
2019 * becomes the real volume */
2020 root_sink->soft_volume = root_sink->real_volume;
2021
2022 /* This tells the sink that soft volume and/or real volume changed */
2023 if (send_msg)
2024 pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
2025 }
2026
2027 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2028 * Only to be called by sink implementor */
2029 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
2030
2031 pa_sink_assert_ref(s);
2032 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2033
2034 if (s->flags & PA_SINK_DEFERRED_VOLUME)
2035 pa_sink_assert_io_context(s);
2036 else
2037 pa_assert_ctl_context();
2038
2039 if (!volume)
2040 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
2041 else
2042 s->soft_volume = *volume;
2043
2044 if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_DEFERRED_VOLUME))
2045 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
2046 else
2047 s->thread_info.soft_volume = s->soft_volume;
2048 }
2049
2050 /* Called from the main thread. Only called for the root sink in volume sharing
2051 * cases, except for internal recursive calls. */
2052 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
2053 pa_sink_input *i;
2054 uint32_t idx;
2055
2056 pa_sink_assert_ref(s);
2057 pa_assert(old_real_volume);
2058 pa_assert_ctl_context();
2059 pa_assert(PA_SINK_IS_LINKED(s->state));
2060
2061 /* This is called when the hardware's real volume changes due to
2062 * some external event. We copy the real volume into our
2063 * reference volume and then rebuild the stream volumes based on
2064 * i->real_ratio which should stay fixed. */
2065
2066 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
2067 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
2068 return;
2069
2070 /* 1. Make the real volume the reference volume */
2071 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
2072 }
2073
2074 if (pa_sink_flat_volume_enabled(s)) {
2075
2076 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2077 pa_cvolume old_volume = i->volume;
2078
2079 /* 2. Since the sink's reference and real volumes are equal
2080 * now our ratios should be too. */
2081 i->reference_ratio = i->real_ratio;
2082
2083 /* 3. Recalculate the new stream reference volume based on the
2084 * reference ratio and the sink's reference volume.
2085 *
2086 * This basically calculates:
2087 *
2088 * i->volume = s->reference_volume * i->reference_ratio
2089 *
2090 * This is identical to propagate_reference_volume() */
2091 i->volume = s->reference_volume;
2092 pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
2093 pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
2094
2095 /* Notify if something changed */
2096 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
2097
2098 if (i->volume_changed)
2099 i->volume_changed(i);
2100
2101 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
2102 }
2103
2104 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2105 propagate_real_volume(i->origin_sink, old_real_volume);
2106 }
2107 }
2108
2109 /* Something got changed in the hardware. It probably makes sense
2110 * to save changed hw settings given that hw volume changes not
2111 * triggered by PA are almost certainly done by the user. */
2112 if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2113 s->save_volume = TRUE;
2114 }
2115
2116 /* Called from io thread */
2117 void pa_sink_update_volume_and_mute(pa_sink *s) {
2118 pa_assert(s);
2119 pa_sink_assert_io_context(s);
2120
2121 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
2122 }
2123
2124 /* Called from main thread */
2125 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
2126 pa_sink_assert_ref(s);
2127 pa_assert_ctl_context();
2128 pa_assert(PA_SINK_IS_LINKED(s->state));
2129
2130 if (s->refresh_volume || force_refresh) {
2131 struct pa_cvolume old_real_volume;
2132
2133 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2134
2135 old_real_volume = s->real_volume;
2136
2137 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume)
2138 s->get_volume(s);
2139
2140 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
2141
2142 update_real_volume(s, &s->real_volume, &s->channel_map);
2143 propagate_real_volume(s, &old_real_volume);
2144 }
2145
2146 return &s->reference_volume;
2147 }
2148
2149 /* Called from main thread. In volume sharing cases, only the root sink may
2150 * call this. */
2151 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
2152 pa_cvolume old_real_volume;
2153
2154 pa_sink_assert_ref(s);
2155 pa_assert_ctl_context();
2156 pa_assert(PA_SINK_IS_LINKED(s->state));
2157 pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
2158
2159 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2160
2161 old_real_volume = s->real_volume;
2162 update_real_volume(s, new_real_volume, &s->channel_map);
2163 propagate_real_volume(s, &old_real_volume);
2164 }
2165
2166 /* Called from main thread */
2167 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
2168 pa_bool_t old_muted;
2169
2170 pa_sink_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2173
2174 old_muted = s->muted;
2175 s->muted = mute;
2176 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
2177
2178 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->set_mute)
2179 s->set_mute(s);
2180
2181 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2182
2183 if (old_muted != s->muted)
2184 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2185 }
2186
2187 /* Called from main thread */
2188 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
2189
2190 pa_sink_assert_ref(s);
2191 pa_assert_ctl_context();
2192 pa_assert(PA_SINK_IS_LINKED(s->state));
2193
2194 if (s->refresh_muted || force_refresh) {
2195 pa_bool_t old_muted = s->muted;
2196
2197 if (!(s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_mute)
2198 s->get_mute(s);
2199
2200 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
2201
2202 if (old_muted != s->muted) {
2203 s->save_muted = TRUE;
2204
2205 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2206
2207 /* Make sure the soft mute status stays in sync */
2208 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
2209 }
2210 }
2211
2212 return s->muted;
2213 }
2214
2215 /* Called from main thread */
2216 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
2217 pa_sink_assert_ref(s);
2218 pa_assert_ctl_context();
2219 pa_assert(PA_SINK_IS_LINKED(s->state));
2220
2221 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2222
2223 if (s->muted == new_muted)
2224 return;
2225
2226 s->muted = new_muted;
2227 s->save_muted = TRUE;
2228
2229 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2230 }
2231
2232 /* Called from main thread */
2233 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
2234 pa_sink_assert_ref(s);
2235 pa_assert_ctl_context();
2236
2237 if (p)
2238 pa_proplist_update(s->proplist, mode, p);
2239
2240 if (PA_SINK_IS_LINKED(s->state)) {
2241 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2242 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2243 }
2244
2245 return TRUE;
2246 }
2247
2248 /* Called from main thread */
2249 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2250 void pa_sink_set_description(pa_sink *s, const char *description) {
2251 const char *old;
2252 pa_sink_assert_ref(s);
2253 pa_assert_ctl_context();
2254
2255 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
2256 return;
2257
2258 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2259
2260 if (old && description && pa_streq(old, description))
2261 return;
2262
2263 if (description)
2264 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
2265 else
2266 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
2267
2268 if (s->monitor_source) {
2269 char *n;
2270
2271 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
2272 pa_source_set_description(s->monitor_source, n);
2273 pa_xfree(n);
2274 }
2275
2276 if (PA_SINK_IS_LINKED(s->state)) {
2277 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2278 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
2279 }
2280 }
2281
2282 /* Called from main thread */
2283 unsigned pa_sink_linked_by(pa_sink *s) {
2284 unsigned ret;
2285
2286 pa_sink_assert_ref(s);
2287 pa_assert_ctl_context();
2288 pa_assert(PA_SINK_IS_LINKED(s->state));
2289
2290 ret = pa_idxset_size(s->inputs);
2291
2292 /* We add in the number of streams connected to us here. Please
2293 * note the asymmetry to pa_sink_used_by()! */
2294
2295 if (s->monitor_source)
2296 ret += pa_source_linked_by(s->monitor_source);
2297
2298 return ret;
2299 }
2300
2301 /* Called from main thread */
2302 unsigned pa_sink_used_by(pa_sink *s) {
2303 unsigned ret;
2304
2305 pa_sink_assert_ref(s);
2306 pa_assert_ctl_context();
2307 pa_assert(PA_SINK_IS_LINKED(s->state));
2308
2309 ret = pa_idxset_size(s->inputs);
2310 pa_assert(ret >= s->n_corked);
2311
2312 /* Streams connected to our monitor source do not matter for
2313 * pa_sink_used_by()!.*/
2314
2315 return ret - s->n_corked;
2316 }
2317
2318 /* Called from main thread */
2319 unsigned pa_sink_check_suspend(pa_sink *s) {
2320 unsigned ret;
2321 pa_sink_input *i;
2322 uint32_t idx;
2323
2324 pa_sink_assert_ref(s);
2325 pa_assert_ctl_context();
2326
2327 if (!PA_SINK_IS_LINKED(s->state))
2328 return 0;
2329
2330 ret = 0;
2331
2332 PA_IDXSET_FOREACH(i, s->inputs, idx) {
2333 pa_sink_input_state_t st;
2334
2335 st = pa_sink_input_get_state(i);
2336
2337 /* We do not assert here. It is perfectly valid for a sink input to
2338 * be in the INIT state (i.e. created, marked done but not yet put)
2339 * and we should not care if it's unlinked as it won't contribute
2340 * towards our busy status.
2341 */
2342 if (!PA_SINK_INPUT_IS_LINKED(st))
2343 continue;
2344
2345 if (st == PA_SINK_INPUT_CORKED)
2346 continue;
2347
2348 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
2349 continue;
2350
2351 ret ++;
2352 }
2353
2354 if (s->monitor_source)
2355 ret += pa_source_check_suspend(s->monitor_source);
2356
2357 return ret;
2358 }
2359
2360 /* Called from the IO thread */
2361 static void sync_input_volumes_within_thread(pa_sink *s) {
2362 pa_sink_input *i;
2363 void *state = NULL;
2364
2365 pa_sink_assert_ref(s);
2366 pa_sink_assert_io_context(s);
2367
2368 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2369 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
2370 continue;
2371
2372 i->thread_info.soft_volume = i->soft_volume;
2373 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
2374 }
2375 }
2376
2377 /* Called from the IO thread. Only called for the root sink in volume sharing
2378 * cases, except for internal recursive calls. */
2379 static void set_shared_volume_within_thread(pa_sink *s) {
2380 pa_sink_input *i = NULL;
2381 void *state = NULL;
2382
2383 pa_sink_assert_ref(s);
2384
2385 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2386
2387 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
2388 if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
2389 set_shared_volume_within_thread(i->origin_sink);
2390 }
2391 }
2392
2393 /* Called from IO thread, except when it is not */
2394 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2395 pa_sink *s = PA_SINK(o);
2396 pa_sink_assert_ref(s);
2397
2398 switch ((pa_sink_message_t) code) {
2399
2400 case PA_SINK_MESSAGE_ADD_INPUT: {
2401 pa_sink_input *i = PA_SINK_INPUT(userdata);
2402
2403 /* If you change anything here, make sure to change the
2404 * sink input handling a few lines down at
2405 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2406
2407 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2408
2409 /* Since the caller sleeps in pa_sink_input_put(), we can
2410 * safely access data outside of thread_info even though
2411 * it is mutable */
2412
2413 if ((i->thread_info.sync_prev = i->sync_prev)) {
2414 pa_assert(i->sink == i->thread_info.sync_prev->sink);
2415 pa_assert(i->sync_prev->sync_next == i);
2416 i->thread_info.sync_prev->thread_info.sync_next = i;
2417 }
2418
2419 if ((i->thread_info.sync_next = i->sync_next)) {
2420 pa_assert(i->sink == i->thread_info.sync_next->sink);
2421 pa_assert(i->sync_next->sync_prev == i);
2422 i->thread_info.sync_next->thread_info.sync_prev = i;
2423 }
2424
2425 pa_assert(!i->thread_info.attached);
2426 i->thread_info.attached = TRUE;
2427
2428 if (i->attach)
2429 i->attach(i);
2430
2431 pa_sink_input_set_state_within_thread(i, i->state);
2432
2433 /* The requested latency of the sink input needs to be fixed up and
2434 * then configured on the sink. If this causes the sink latency to
2435 * go down, the sink implementor is responsible for doing a rewind
2436 * in the update_requested_latency() callback to ensure that the
2437 * sink buffer doesn't contain more data than what the new latency
2438 * allows.
2439 *
2440 * XXX: Does it really make sense to push this responsibility to
2441 * the sink implementors? Wouldn't it be better to do it once in
2442 * the core than many times in the modules? */
2443
2444 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2445 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2446
2447 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2448 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2449
2450 /* We don't rewind here automatically. This is left to the
2451 * sink input implementor because some sink inputs need a
2452 * slow start, i.e. need some time to buffer client
2453 * samples before beginning streaming.
2454 *
2455 * XXX: Does it really make sense to push this functionality to
2456 * the sink implementors? Wouldn't it be better to do it once in
2457 * the core than many times in the modules? */
2458
2459 /* In flat volume mode we need to update the volume as
2460 * well */
2461 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2462 }
2463
2464 case PA_SINK_MESSAGE_REMOVE_INPUT: {
2465 pa_sink_input *i = PA_SINK_INPUT(userdata);
2466
2467 /* If you change anything here, make sure to change the
2468 * sink input handling a few lines down at
2469 * PA_SINK_MESSAGE_START_MOVE, too. */
2470
2471 if (i->detach)
2472 i->detach(i);
2473
2474 pa_sink_input_set_state_within_thread(i, i->state);
2475
2476 pa_assert(i->thread_info.attached);
2477 i->thread_info.attached = FALSE;
2478
2479 /* Since the caller sleeps in pa_sink_input_unlink(),
2480 * we can safely access data outside of thread_info even
2481 * though it is mutable */
2482
2483 pa_assert(!i->sync_prev);
2484 pa_assert(!i->sync_next);
2485
2486 if (i->thread_info.sync_prev) {
2487 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
2488 i->thread_info.sync_prev = NULL;
2489 }
2490
2491 if (i->thread_info.sync_next) {
2492 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
2493 i->thread_info.sync_next = NULL;
2494 }
2495
2496 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2497 pa_sink_input_unref(i);
2498
2499 pa_sink_invalidate_requested_latency(s, TRUE);
2500 pa_sink_request_rewind(s, (size_t) -1);
2501
2502 /* In flat volume mode we need to update the volume as
2503 * well */
2504 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2505 }
2506
2507 case PA_SINK_MESSAGE_START_MOVE: {
2508 pa_sink_input *i = PA_SINK_INPUT(userdata);
2509
2510 /* We don't support moving synchronized streams. */
2511 pa_assert(!i->sync_prev);
2512 pa_assert(!i->sync_next);
2513 pa_assert(!i->thread_info.sync_next);
2514 pa_assert(!i->thread_info.sync_prev);
2515
2516 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2517 pa_usec_t usec = 0;
2518 size_t sink_nbytes, total_nbytes;
2519
2520 /* The old sink probably has some audio from this
2521 * stream in its buffer. We want to "take it back" as
2522 * much as possible and play it to the new sink. We
2523 * don't know at this point how much the old sink can
2524 * rewind. We have to pick something, and that
2525 * something is the full latency of the old sink here.
2526 * So we rewind the stream buffer by the sink latency
2527 * amount, which may be more than what we should
2528 * rewind. This can result in a chunk of audio being
2529 * played both to the old sink and the new sink.
2530 *
2531 * FIXME: Fix this code so that we don't have to make
2532 * guesses about how much the sink will actually be
2533 * able to rewind. If someone comes up with a solution
2534 * for this, something to note is that the part of the
2535 * latency that the old sink couldn't rewind should
2536 * ideally be compensated after the stream has moved
2537 * to the new sink by adding silence. The new sink
2538 * most likely can't start playing the moved stream
2539 * immediately, and that gap should be removed from
2540 * the "compensation silence" (at least at the time of
2541 * writing this, the move finish code will actually
2542 * already take care of dropping the new sink's
2543 * unrewindable latency, so taking into account the
2544 * unrewindable latency of the old sink is the only
2545 * problem).
2546 *
2547 * The render_memblockq contents are discarded,
2548 * because when the sink changes, the format of the
2549 * audio stored in the render_memblockq may change
2550 * too, making the stored audio invalid. FIXME:
2551 * However, the read and write indices are moved back
2552 * the same amount, so if they are not the same now,
2553 * they won't be the same after the rewind either. If
2554 * the write index of the render_memblockq is ahead of
2555 * the read index, then the render_memblockq will feed
2556 * the new sink some silence first, which it shouldn't
2557 * do. The write index should be flushed to be the
2558 * same as the read index. */
2559
2560 /* Get the latency of the sink */
2561 usec = pa_sink_get_latency_within_thread(s);
2562 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2563 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
2564
2565 if (total_nbytes > 0) {
2566 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
2567 i->thread_info.rewrite_flush = TRUE;
2568 pa_sink_input_process_rewind(i, sink_nbytes);
2569 }
2570 }
2571
2572 if (i->detach)
2573 i->detach(i);
2574
2575 pa_assert(i->thread_info.attached);
2576 i->thread_info.attached = FALSE;
2577
2578 /* Let's remove the sink input ...*/
2579 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
2580 pa_sink_input_unref(i);
2581
2582 pa_sink_invalidate_requested_latency(s, TRUE);
2583
2584 pa_log_debug("Requesting rewind due to started move");
2585 pa_sink_request_rewind(s, (size_t) -1);
2586
2587 /* In flat volume mode we need to update the volume as
2588 * well */
2589 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2590 }
2591
2592 case PA_SINK_MESSAGE_FINISH_MOVE: {
2593 pa_sink_input *i = PA_SINK_INPUT(userdata);
2594
2595 /* We don't support moving synchronized streams. */
2596 pa_assert(!i->sync_prev);
2597 pa_assert(!i->sync_next);
2598 pa_assert(!i->thread_info.sync_next);
2599 pa_assert(!i->thread_info.sync_prev);
2600
2601 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
2602
2603 pa_assert(!i->thread_info.attached);
2604 i->thread_info.attached = TRUE;
2605
2606 if (i->attach)
2607 i->attach(i);
2608
2609 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
2610 pa_usec_t usec = 0;
2611 size_t nbytes;
2612
2613 /* In the ideal case the new sink would start playing
2614 * the stream immediately. That requires the sink to
2615 * be able to rewind all of its latency, which usually
2616 * isn't possible, so there will probably be some gap
2617 * before the moved stream becomes audible. We then
2618 * have two possibilities: 1) start playing the stream
2619 * from where it is now, or 2) drop the unrewindable
2620 * latency of the sink from the stream. With option 1
2621 * we won't lose any audio but the stream will have a
2622 * pause. With option 2 we may lose some audio but the
2623 * stream time will be somewhat in sync with the wall
2624 * clock. Lennart seems to have chosen option 2 (one
2625 * of the reasons might have been that option 1 is
2626 * actually much harder to implement), so we drop the
2627 * latency of the new sink from the moved stream and
2628 * hope that the sink will undo most of that in the
2629 * rewind. */
2630
2631 /* Get the latency of the sink */
2632 usec = pa_sink_get_latency_within_thread(s);
2633 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
2634
2635 if (nbytes > 0)
2636 pa_sink_input_drop(i, nbytes);
2637
2638 pa_log_debug("Requesting rewind due to finished move");
2639 pa_sink_request_rewind(s, nbytes);
2640 }
2641
2642 /* Updating the requested sink latency has to be done
2643 * after the sink rewind request, not before, because
2644 * otherwise the sink may limit the rewind amount
2645 * needlessly. */
2646
2647 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
2648 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
2649
2650 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2651 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2652
2653 return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2654 }
2655
2656 case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
2657 pa_sink *root_sink = pa_sink_get_master(s);
2658
2659 if (PA_LIKELY(root_sink))
2660 set_shared_volume_within_thread(root_sink);
2661
2662 return 0;
2663 }
2664
2665 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
2666
2667 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
2668 s->set_volume(s);
2669 pa_sink_volume_change_push(s);
2670 }
2671 /* Fall through ... */
2672
2673 case PA_SINK_MESSAGE_SET_VOLUME:
2674
2675 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2676 s->thread_info.soft_volume = s->soft_volume;
2677 pa_sink_request_rewind(s, (size_t) -1);
2678 }
2679
2680 /* Fall through ... */
2681
2682 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2683 sync_input_volumes_within_thread(s);
2684 return 0;
2685
2686 case PA_SINK_MESSAGE_GET_VOLUME:
2687
2688 if ((s->flags & PA_SINK_DEFERRED_VOLUME) && s->get_volume) {
2689 s->get_volume(s);
2690 pa_sink_volume_change_flush(s);
2691 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2692 }
2693
2694 /* In case sink implementor reset SW volume. */
2695 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2696 s->thread_info.soft_volume = s->soft_volume;
2697 pa_sink_request_rewind(s, (size_t) -1);
2698 }
2699
2700 return 0;
2701
2702 case PA_SINK_MESSAGE_SET_MUTE:
2703
2704 if (s->thread_info.soft_muted != s->muted) {
2705 s->thread_info.soft_muted = s->muted;
2706 pa_sink_request_rewind(s, (size_t) -1);
2707 }
2708
2709 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->set_mute)
2710 s->set_mute(s);
2711
2712 return 0;
2713
2714 case PA_SINK_MESSAGE_GET_MUTE:
2715
2716 if (s->flags & PA_SINK_DEFERRED_VOLUME && s->get_mute)
2717 s->get_mute(s);
2718
2719 return 0;
2720
2721 case PA_SINK_MESSAGE_SET_STATE: {
2722
2723 pa_bool_t suspend_change =
2724 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2725 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2726
2727 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2728
2729 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2730 s->thread_info.rewind_nbytes = 0;
2731 s->thread_info.rewind_requested = FALSE;
2732 }
2733
2734 if (suspend_change) {
2735 pa_sink_input *i;
2736 void *state = NULL;
2737
2738 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2739 if (i->suspend_within_thread)
2740 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2741 }
2742
2743 return 0;
2744 }
2745
2746 case PA_SINK_MESSAGE_DETACH:
2747
2748 /* Detach all streams */
2749 pa_sink_detach_within_thread(s);
2750 return 0;
2751
2752 case PA_SINK_MESSAGE_ATTACH:
2753
2754 /* Reattach all streams */
2755 pa_sink_attach_within_thread(s);
2756 return 0;
2757
2758 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2759
2760 pa_usec_t *usec = userdata;
2761 *usec = pa_sink_get_requested_latency_within_thread(s);
2762
2763 /* Yes, that's right, the IO thread will see -1 when no
2764 * explicit requested latency is configured, the main
2765 * thread will see max_latency */
2766 if (*usec == (pa_usec_t) -1)
2767 *usec = s->thread_info.max_latency;
2768
2769 return 0;
2770 }
2771
2772 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2773 pa_usec_t *r = userdata;
2774
2775 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2776
2777 return 0;
2778 }
2779
2780 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2781 pa_usec_t *r = userdata;
2782
2783 r[0] = s->thread_info.min_latency;
2784 r[1] = s->thread_info.max_latency;
2785
2786 return 0;
2787 }
2788
2789 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2790
2791 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2792 return 0;
2793
2794 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2795
2796 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2797 return 0;
2798
2799 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2800
2801 *((size_t*) userdata) = s->thread_info.max_rewind;
2802 return 0;
2803
2804 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2805
2806 *((size_t*) userdata) = s->thread_info.max_request;
2807 return 0;
2808
2809 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2810
2811 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2812 return 0;
2813
2814 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2815
2816 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2817 return 0;
2818
2819 case PA_SINK_MESSAGE_SET_PORT:
2820
2821 pa_assert(userdata);
2822 if (s->set_port) {
2823 struct sink_message_set_port *msg_data = userdata;
2824 msg_data->ret = s->set_port(s, msg_data->port);
2825 }
2826 return 0;
2827
2828 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2829 /* This message is sent from IO-thread and handled in main thread. */
2830 pa_assert_ctl_context();
2831
2832 /* Make sure we're not messing with main thread when no longer linked */
2833 if (!PA_SINK_IS_LINKED(s->state))
2834 return 0;
2835
2836 pa_sink_get_volume(s, TRUE);
2837 pa_sink_get_mute(s, TRUE);
2838 return 0;
2839
2840 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET:
2841 s->thread_info.latency_offset = offset;
2842 return 0;
2843
2844 case PA_SINK_MESSAGE_GET_LATENCY:
2845 case PA_SINK_MESSAGE_MAX:
2846 ;
2847 }
2848
2849 return -1;
2850 }
2851
2852 /* Called from main thread */
2853 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2854 pa_sink *sink;
2855 uint32_t idx;
2856 int ret = 0;
2857
2858 pa_core_assert_ref(c);
2859 pa_assert_ctl_context();
2860 pa_assert(cause != 0);
2861
2862 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2863 int r;
2864
2865 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2866 ret = r;
2867 }
2868
2869 return ret;
2870 }
2871
2872 /* Called from main thread */
2873 void pa_sink_detach(pa_sink *s) {
2874 pa_sink_assert_ref(s);
2875 pa_assert_ctl_context();
2876 pa_assert(PA_SINK_IS_LINKED(s->state));
2877
2878 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2879 }
2880
2881 /* Called from main thread */
2882 void pa_sink_attach(pa_sink *s) {
2883 pa_sink_assert_ref(s);
2884 pa_assert_ctl_context();
2885 pa_assert(PA_SINK_IS_LINKED(s->state));
2886
2887 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2888 }
2889
2890 /* Called from IO thread */
2891 void pa_sink_detach_within_thread(pa_sink *s) {
2892 pa_sink_input *i;
2893 void *state = NULL;
2894
2895 pa_sink_assert_ref(s);
2896 pa_sink_assert_io_context(s);
2897 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2898
2899 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2900 if (i->detach)
2901 i->detach(i);
2902
2903 if (s->monitor_source)
2904 pa_source_detach_within_thread(s->monitor_source);
2905 }
2906
2907 /* Called from IO thread */
2908 void pa_sink_attach_within_thread(pa_sink *s) {
2909 pa_sink_input *i;
2910 void *state = NULL;
2911
2912 pa_sink_assert_ref(s);
2913 pa_sink_assert_io_context(s);
2914 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2915
2916 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2917 if (i->attach)
2918 i->attach(i);
2919
2920 if (s->monitor_source)
2921 pa_source_attach_within_thread(s->monitor_source);
2922 }
2923
2924 /* Called from IO thread */
2925 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2926 pa_sink_assert_ref(s);
2927 pa_sink_assert_io_context(s);
2928 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2929
2930 if (nbytes == (size_t) -1)
2931 nbytes = s->thread_info.max_rewind;
2932
2933 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2934
2935 if (s->thread_info.rewind_requested &&
2936 nbytes <= s->thread_info.rewind_nbytes)
2937 return;
2938
2939 s->thread_info.rewind_nbytes = nbytes;
2940 s->thread_info.rewind_requested = TRUE;
2941
2942 if (s->request_rewind)
2943 s->request_rewind(s);
2944 }
2945
2946 /* Called from IO thread */
2947 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2948 pa_usec_t result = (pa_usec_t) -1;
2949 pa_sink_input *i;
2950 void *state = NULL;
2951 pa_usec_t monitor_latency;
2952
2953 pa_sink_assert_ref(s);
2954 pa_sink_assert_io_context(s);
2955
2956 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2957 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2958
2959 if (s->thread_info.requested_latency_valid)
2960 return s->thread_info.requested_latency;
2961
2962 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2963 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2964 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2965 result = i->thread_info.requested_sink_latency;
2966
2967 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2968
2969 if (monitor_latency != (pa_usec_t) -1 &&
2970 (result == (pa_usec_t) -1 || result > monitor_latency))
2971 result = monitor_latency;
2972
2973 if (result != (pa_usec_t) -1)
2974 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2975
2976 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2977 /* Only cache if properly initialized */
2978 s->thread_info.requested_latency = result;
2979 s->thread_info.requested_latency_valid = TRUE;
2980 }
2981
2982 return result;
2983 }
2984
2985 /* Called from main thread */
2986 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2987 pa_usec_t usec = 0;
2988
2989 pa_sink_assert_ref(s);
2990 pa_assert_ctl_context();
2991 pa_assert(PA_SINK_IS_LINKED(s->state));
2992
2993 if (s->state == PA_SINK_SUSPENDED)
2994 return 0;
2995
2996 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2997
2998 return usec;
2999 }
3000
3001 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3002 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
3003 pa_sink_input *i;
3004 void *state = NULL;
3005
3006 pa_sink_assert_ref(s);
3007 pa_sink_assert_io_context(s);
3008
3009 if (max_rewind == s->thread_info.max_rewind)
3010 return;
3011
3012 s->thread_info.max_rewind = max_rewind;
3013
3014 if (PA_SINK_IS_LINKED(s->thread_info.state))
3015 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3016 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
3017
3018 if (s->monitor_source)
3019 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
3020 }
3021
3022 /* Called from main thread */
3023 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
3024 pa_sink_assert_ref(s);
3025 pa_assert_ctl_context();
3026
3027 if (PA_SINK_IS_LINKED(s->state))
3028 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
3029 else
3030 pa_sink_set_max_rewind_within_thread(s, max_rewind);
3031 }
3032
3033 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3034 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
3035 void *state = NULL;
3036
3037 pa_sink_assert_ref(s);
3038 pa_sink_assert_io_context(s);
3039
3040 if (max_request == s->thread_info.max_request)
3041 return;
3042
3043 s->thread_info.max_request = max_request;
3044
3045 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3046 pa_sink_input *i;
3047
3048 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3049 pa_sink_input_update_max_request(i, s->thread_info.max_request);
3050 }
3051 }
3052
3053 /* Called from main thread */
3054 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
3055 pa_sink_assert_ref(s);
3056 pa_assert_ctl_context();
3057
3058 if (PA_SINK_IS_LINKED(s->state))
3059 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
3060 else
3061 pa_sink_set_max_request_within_thread(s, max_request);
3062 }
3063
3064 /* Called from IO thread */
3065 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
3066 pa_sink_input *i;
3067 void *state = NULL;
3068
3069 pa_sink_assert_ref(s);
3070 pa_sink_assert_io_context(s);
3071
3072 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
3073 s->thread_info.requested_latency_valid = FALSE;
3074 else if (dynamic)
3075 return;
3076
3077 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3078
3079 if (s->update_requested_latency)
3080 s->update_requested_latency(s);
3081
3082 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3083 if (i->update_sink_requested_latency)
3084 i->update_sink_requested_latency(i);
3085 }
3086 }
3087
3088 /* Called from main thread */
3089 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3090 pa_sink_assert_ref(s);
3091 pa_assert_ctl_context();
3092
3093 /* min_latency == 0: no limit
3094 * min_latency anything else: specified limit
3095 *
3096 * Similar for max_latency */
3097
3098 if (min_latency < ABSOLUTE_MIN_LATENCY)
3099 min_latency = ABSOLUTE_MIN_LATENCY;
3100
3101 if (max_latency <= 0 ||
3102 max_latency > ABSOLUTE_MAX_LATENCY)
3103 max_latency = ABSOLUTE_MAX_LATENCY;
3104
3105 pa_assert(min_latency <= max_latency);
3106
3107 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3108 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3109 max_latency == ABSOLUTE_MAX_LATENCY) ||
3110 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3111
3112 if (PA_SINK_IS_LINKED(s->state)) {
3113 pa_usec_t r[2];
3114
3115 r[0] = min_latency;
3116 r[1] = max_latency;
3117
3118 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
3119 } else
3120 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
3121 }
3122
3123 /* Called from main thread */
3124 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
3125 pa_sink_assert_ref(s);
3126 pa_assert_ctl_context();
3127 pa_assert(min_latency);
3128 pa_assert(max_latency);
3129
3130 if (PA_SINK_IS_LINKED(s->state)) {
3131 pa_usec_t r[2] = { 0, 0 };
3132
3133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
3134
3135 *min_latency = r[0];
3136 *max_latency = r[1];
3137 } else {
3138 *min_latency = s->thread_info.min_latency;
3139 *max_latency = s->thread_info.max_latency;
3140 }
3141 }
3142
3143 /* Called from IO thread */
3144 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
3145 pa_sink_assert_ref(s);
3146 pa_sink_assert_io_context(s);
3147
3148 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
3149 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
3150 pa_assert(min_latency <= max_latency);
3151
3152 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3153 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
3154 max_latency == ABSOLUTE_MAX_LATENCY) ||
3155 (s->flags & PA_SINK_DYNAMIC_LATENCY));
3156
3157 if (s->thread_info.min_latency == min_latency &&
3158 s->thread_info.max_latency == max_latency)
3159 return;
3160
3161 s->thread_info.min_latency = min_latency;
3162 s->thread_info.max_latency = max_latency;
3163
3164 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3165 pa_sink_input *i;
3166 void *state = NULL;
3167
3168 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3169 if (i->update_sink_latency_range)
3170 i->update_sink_latency_range(i);
3171 }
3172
3173 pa_sink_invalidate_requested_latency(s, FALSE);
3174
3175 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
3176 }
3177
3178 /* Called from main thread */
3179 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
3180 pa_sink_assert_ref(s);
3181 pa_assert_ctl_context();
3182
3183 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3184 pa_assert(latency == 0);
3185 return;
3186 }
3187
3188 if (latency < ABSOLUTE_MIN_LATENCY)
3189 latency = ABSOLUTE_MIN_LATENCY;
3190
3191 if (latency > ABSOLUTE_MAX_LATENCY)
3192 latency = ABSOLUTE_MAX_LATENCY;
3193
3194 if (PA_SINK_IS_LINKED(s->state))
3195 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
3196 else
3197 s->thread_info.fixed_latency = latency;
3198
3199 pa_source_set_fixed_latency(s->monitor_source, latency);
3200 }
3201
3202 /* Called from main thread */
3203 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
3204 pa_usec_t latency;
3205
3206 pa_sink_assert_ref(s);
3207 pa_assert_ctl_context();
3208
3209 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
3210 return 0;
3211
3212 if (PA_SINK_IS_LINKED(s->state))
3213 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
3214 else
3215 latency = s->thread_info.fixed_latency;
3216
3217 return latency;
3218 }
3219
3220 /* Called from IO thread */
3221 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
3222 pa_sink_assert_ref(s);
3223 pa_sink_assert_io_context(s);
3224
3225 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
3226 pa_assert(latency == 0);
3227 return;
3228 }
3229
3230 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
3231 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
3232
3233 if (s->thread_info.fixed_latency == latency)
3234 return;
3235
3236 s->thread_info.fixed_latency = latency;
3237
3238 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
3239 pa_sink_input *i;
3240 void *state = NULL;
3241
3242 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
3243 if (i->update_sink_fixed_latency)
3244 i->update_sink_fixed_latency(i);
3245 }
3246
3247 pa_sink_invalidate_requested_latency(s, FALSE);
3248
3249 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
3250 }
3251
3252 /* Called from main context */
3253 void pa_sink_set_latency_offset(pa_sink *s, int64_t offset) {
3254 pa_sink_assert_ref(s);
3255
3256 s->latency_offset = offset;
3257
3258 if (PA_SINK_IS_LINKED(s->state))
3259 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
3260 else
3261 s->thread_info.latency_offset = offset;
3262 }
3263
3264 /* Called from main context */
3265 size_t pa_sink_get_max_rewind(pa_sink *s) {
3266 size_t r;
3267 pa_assert_ctl_context();
3268 pa_sink_assert_ref(s);
3269
3270 if (!PA_SINK_IS_LINKED(s->state))
3271 return s->thread_info.max_rewind;
3272
3273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
3274
3275 return r;
3276 }
3277
3278 /* Called from main context */
3279 size_t pa_sink_get_max_request(pa_sink *s) {
3280 size_t r;
3281 pa_sink_assert_ref(s);
3282 pa_assert_ctl_context();
3283
3284 if (!PA_SINK_IS_LINKED(s->state))
3285 return s->thread_info.max_request;
3286
3287 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
3288
3289 return r;
3290 }
3291
3292 /* Called from main context */
3293 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
3294 pa_device_port *port;
3295 int ret;
3296
3297 pa_sink_assert_ref(s);
3298 pa_assert_ctl_context();
3299
3300 if (!s->set_port) {
3301 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
3302 return -PA_ERR_NOTIMPLEMENTED;
3303 }
3304
3305 if (!name)
3306 return -PA_ERR_NOENTITY;
3307
3308 if (!(port = pa_hashmap_get(s->ports, name)))
3309 return -PA_ERR_NOENTITY;
3310
3311 if (s->active_port == port) {
3312 s->save_port = s->save_port || save;
3313 return 0;
3314 }
3315
3316 if (s->flags & PA_SINK_DEFERRED_VOLUME) {
3317 struct sink_message_set_port msg = { .port = port, .ret = 0 };
3318 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
3319 ret = msg.ret;
3320 }
3321 else
3322 ret = s->set_port(s, port);
3323
3324 if (ret < 0)
3325 return -PA_ERR_NOENTITY;
3326
3327 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
3328
3329 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
3330
3331 s->active_port = port;
3332 s->save_port = save;
3333
3334 pa_sink_set_latency_offset(s, s->active_port->latency_offset);
3335
3336 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
3337
3338 return 0;
3339 }
3340
3341 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
3342 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
3343
3344 pa_assert(p);
3345
3346 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
3347 return TRUE;
3348
3349 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3350
3351 if (pa_streq(ff, "microphone"))
3352 t = "audio-input-microphone";
3353 else if (pa_streq(ff, "webcam"))
3354 t = "camera-web";
3355 else if (pa_streq(ff, "computer"))
3356 t = "computer";
3357 else if (pa_streq(ff, "handset"))
3358 t = "phone";
3359 else if (pa_streq(ff, "portable"))
3360 t = "multimedia-player";
3361 else if (pa_streq(ff, "tv"))
3362 t = "video-display";
3363
3364 /*
3365 * The following icons are not part of the icon naming spec,
3366 * because Rodney Dawes sucks as the maintainer of that spec.
3367 *
3368 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3369 */
3370 else if (pa_streq(ff, "headset"))
3371 t = "audio-headset";
3372 else if (pa_streq(ff, "headphone"))
3373 t = "audio-headphones";
3374 else if (pa_streq(ff, "speaker"))
3375 t = "audio-speakers";
3376 else if (pa_streq(ff, "hands-free"))
3377 t = "audio-handsfree";
3378 }
3379
3380 if (!t)
3381 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3382 if (pa_streq(c, "modem"))
3383 t = "modem";
3384
3385 if (!t) {
3386 if (is_sink)
3387 t = "audio-card";
3388 else
3389 t = "audio-input-microphone";
3390 }
3391
3392 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3393 if (strstr(profile, "analog"))
3394 s = "-analog";
3395 else if (strstr(profile, "iec958"))
3396 s = "-iec958";
3397 else if (strstr(profile, "hdmi"))
3398 s = "-hdmi";
3399 }
3400
3401 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
3402
3403 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
3404
3405 return TRUE;
3406 }
3407
3408 pa_bool_t pa_device_init_description(pa_proplist *p) {
3409 const char *s, *d = NULL, *k;
3410 pa_assert(p);
3411
3412 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
3413 return TRUE;
3414
3415 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3416 if (pa_streq(s, "internal"))
3417 d = _("Built-in Audio");
3418
3419 if (!d)
3420 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
3421 if (pa_streq(s, "modem"))
3422 d = _("Modem");
3423
3424 if (!d)
3425 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
3426
3427 if (!d)
3428 return FALSE;
3429
3430 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
3431
3432 if (d && k)
3433 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, "%s %s", d, k);
3434 else if (d)
3435 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
3436
3437 return TRUE;
3438 }
3439
3440 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
3441 const char *s;
3442 pa_assert(p);
3443
3444 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
3445 return TRUE;
3446
3447 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
3448 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
3449 || pa_streq(s, "headset")) {
3450 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
3451 return TRUE;
3452 }
3453
3454 return FALSE;
3455 }
3456
3457 unsigned pa_device_init_priority(pa_proplist *p) {
3458 const char *s;
3459 unsigned priority = 0;
3460
3461 pa_assert(p);
3462
3463 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
3464
3465 if (pa_streq(s, "sound"))
3466 priority += 9000;
3467 else if (!pa_streq(s, "modem"))
3468 priority += 1000;
3469 }
3470
3471 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
3472
3473 if (pa_streq(s, "internal"))
3474 priority += 900;
3475 else if (pa_streq(s, "speaker"))
3476 priority += 500;
3477 else if (pa_streq(s, "headphone"))
3478 priority += 400;
3479 }
3480
3481 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
3482
3483 if (pa_streq(s, "pci"))
3484 priority += 50;
3485 else if (pa_streq(s, "usb"))
3486 priority += 40;
3487 else if (pa_streq(s, "bluetooth"))
3488 priority += 30;
3489 }
3490
3491 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
3492
3493 if (pa_startswith(s, "analog-"))
3494 priority += 9;
3495 else if (pa_startswith(s, "iec958-"))
3496 priority += 8;
3497 }
3498
3499 return priority;
3500 }
3501
3502 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
3503
3504 /* Called from the IO thread. */
3505 static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
3506 pa_sink_volume_change *c;
3507 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
3508 c = pa_xnew(pa_sink_volume_change, 1);
3509
3510 PA_LLIST_INIT(pa_sink_volume_change, c);
3511 c->at = 0;
3512 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
3513 return c;
3514 }
3515
3516 /* Called from the IO thread. */
3517 static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
3518 pa_assert(c);
3519 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
3520 pa_xfree(c);
3521 }
3522
3523 /* Called from the IO thread. */
3524 void pa_sink_volume_change_push(pa_sink *s) {
3525 pa_sink_volume_change *c = NULL;
3526 pa_sink_volume_change *nc = NULL;
3527 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
3528
3529 const char *direction = NULL;
3530
3531 pa_assert(s);
3532 nc = pa_sink_volume_change_new(s);
3533
3534 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3535 * Adding one more volume for HW would get us rid of this, but I am trying
3536 * to survive with the ones we already have. */
3537 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
3538
3539 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
3540 pa_log_debug("Volume not changing");
3541 pa_sink_volume_change_free(nc);
3542 return;
3543 }
3544
3545 nc->at = pa_sink_get_latency_within_thread(s);
3546 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3547
3548 if (s->thread_info.volume_changes_tail) {
3549 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
3550 /* If volume is going up let's do it a bit late. If it is going
3551 * down let's do it a bit early. */
3552 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
3553 if (nc->at + safety_margin > c->at) {
3554 nc->at += safety_margin;
3555 direction = "up";
3556 break;
3557 }
3558 }
3559 else if (nc->at - safety_margin > c->at) {
3560 nc->at -= safety_margin;
3561 direction = "down";
3562 break;
3563 }
3564 }
3565 }
3566
3567 if (c == NULL) {
3568 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
3569 nc->at += safety_margin;
3570 direction = "up";
3571 } else {
3572 nc->at -= safety_margin;
3573 direction = "down";
3574 }
3575 PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
3576 }
3577 else {
3578 PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
3579 }
3580
3581 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
3582
3583 /* We can ignore volume events that came earlier but should happen later than this. */
3584 PA_LLIST_FOREACH(c, nc->next) {
3585 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
3586 pa_sink_volume_change_free(c);
3587 }
3588 nc->next = NULL;
3589 s->thread_info.volume_changes_tail = nc;
3590 }
3591
3592 /* Called from the IO thread. */
3593 static void pa_sink_volume_change_flush(pa_sink *s) {
3594 pa_sink_volume_change *c = s->thread_info.volume_changes;
3595 pa_assert(s);
3596 s->thread_info.volume_changes = NULL;
3597 s->thread_info.volume_changes_tail = NULL;
3598 while (c) {
3599 pa_sink_volume_change *next = c->next;
3600 pa_sink_volume_change_free(c);
3601 c = next;
3602 }
3603 }
3604
3605 /* Called from the IO thread. */
3606 pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
3607 pa_usec_t now;
3608 pa_bool_t ret = FALSE;
3609
3610 pa_assert(s);
3611
3612 if (!s->thread_info.volume_changes || !PA_SINK_IS_LINKED(s->state)) {
3613 if (usec_to_next)
3614 *usec_to_next = 0;
3615 return ret;
3616 }
3617
3618 pa_assert(s->write_volume);
3619
3620 now = pa_rtclock_now();
3621
3622 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
3623 pa_sink_volume_change *c = s->thread_info.volume_changes;
3624 PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
3625 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3626 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
3627 ret = TRUE;
3628 s->thread_info.current_hw_volume = c->hw_volume;
3629 pa_sink_volume_change_free(c);
3630 }
3631
3632 if (ret)
3633 s->write_volume(s);
3634
3635 if (s->thread_info.volume_changes) {
3636 if (usec_to_next)
3637 *usec_to_next = s->thread_info.volume_changes->at - now;
3638 if (pa_log_ratelimit(PA_LOG_DEBUG))
3639 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
3640 }
3641 else {
3642 if (usec_to_next)
3643 *usec_to_next = 0;
3644 s->thread_info.volume_changes_tail = NULL;
3645 }
3646 return ret;
3647 }
3648
3649 /* Called from the IO thread. */
3650 static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
3651 /* All the queued volume events later than current latency are shifted to happen earlier. */
3652 pa_sink_volume_change *c;
3653 pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
3654 pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
3655 pa_usec_t limit = pa_sink_get_latency_within_thread(s);
3656
3657 pa_log_debug("latency = %lld", (long long) limit);
3658 limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
3659
3660 PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
3661 pa_usec_t modified_limit = limit;
3662 if (prev_vol > pa_cvolume_avg(&c->hw_volume))
3663 modified_limit -= s->thread_info.volume_change_safety_margin;
3664 else
3665 modified_limit += s->thread_info.volume_change_safety_margin;
3666 if (c->at > modified_limit) {
3667 c->at -= rewound;
3668 if (c->at < modified_limit)
3669 c->at = modified_limit;
3670 }
3671 prev_vol = pa_cvolume_avg(&c->hw_volume);
3672 }
3673 pa_sink_volume_change_apply(s, NULL);
3674 }
3675
3676 /* Called from the main thread */
3677 /* Gets the list of formats supported by the sink. The members and idxset must
3678 * be freed by the caller. */
3679 pa_idxset* pa_sink_get_formats(pa_sink *s) {
3680 pa_idxset *ret;
3681
3682 pa_assert(s);
3683
3684 if (s->get_formats) {
3685 /* Sink supports format query, all is good */
3686 ret = s->get_formats(s);
3687 } else {
3688 /* Sink doesn't support format query, so assume it does PCM */
3689 pa_format_info *f = pa_format_info_new();
3690 f->encoding = PA_ENCODING_PCM;
3691
3692 ret = pa_idxset_new(NULL, NULL);
3693 pa_idxset_put(ret, f, NULL);
3694 }
3695
3696 return ret;
3697 }
3698
3699 /* Called from the main thread */
3700 /* Allows an external source to set what formats a sink supports if the sink
3701 * permits this. The function makes a copy of the formats on success. */
3702 pa_bool_t pa_sink_set_formats(pa_sink *s, pa_idxset *formats) {
3703 pa_assert(s);
3704 pa_assert(formats);
3705
3706 if (s->set_formats)
3707 /* Sink supports setting formats -- let's give it a shot */
3708 return s->set_formats(s, formats);
3709 else
3710 /* Sink doesn't support setting this -- bail out */
3711 return FALSE;
3712 }
3713
3714 /* Called from the main thread */
3715 /* Checks if the sink can accept this format */
3716 pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
3717 {
3718 pa_idxset *formats = NULL;
3719 pa_bool_t ret = FALSE;
3720
3721 pa_assert(s);
3722 pa_assert(f);
3723
3724 formats = pa_sink_get_formats(s);
3725
3726 if (formats) {
3727 pa_format_info *finfo_device;
3728 uint32_t i;
3729
3730 PA_IDXSET_FOREACH(finfo_device, formats, i) {
3731 if (pa_format_info_is_compatible(finfo_device, f)) {
3732 ret = TRUE;
3733 break;
3734 }
3735 }
3736
3737 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3738 }
3739
3740 return ret;
3741 }
3742
3743 /* Called from the main thread */
3744 /* Calculates the intersection between formats supported by the sink and
3745 * in_formats, and returns these, in the order of the sink's formats. */
3746 pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
3747 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
3748 pa_format_info *f_sink, *f_in;
3749 uint32_t i, j;
3750
3751 pa_assert(s);
3752
3753 if (!in_formats || pa_idxset_isempty(in_formats))
3754 goto done;
3755
3756 sink_formats = pa_sink_get_formats(s);
3757
3758 PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
3759 PA_IDXSET_FOREACH(f_in, in_formats, j) {
3760 if (pa_format_info_is_compatible(f_sink, f_in))
3761 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
3762 }
3763 }
3764
3765 done:
3766 if (sink_formats)
3767 pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
3768
3769 return out_formats;
3770 }