2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/mix.h>
46 #include <pulsecore/core-subscribe.h>
47 #include <pulsecore/log.h>
48 #include <pulsecore/macro.h>
49 #include <pulsecore/play-memblockq.h>
50 #include <pulsecore/flist.h>
54 #define MAX_MIX_CHANNELS 32
55 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
56 #define ABSOLUTE_MIN_LATENCY (500)
57 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
58 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
60 PA_DEFINE_PUBLIC_CLASS(pa_sink
, pa_msgobject
);
62 struct pa_sink_volume_change
{
66 PA_LLIST_FIELDS(pa_sink_volume_change
);
69 struct sink_message_set_port
{
74 static void sink_free(pa_object
*s
);
76 static void pa_sink_volume_change_push(pa_sink
*s
);
77 static void pa_sink_volume_change_flush(pa_sink
*s
);
78 static void pa_sink_volume_change_rewind(pa_sink
*s
, size_t nbytes
);
80 pa_sink_new_data
* pa_sink_new_data_init(pa_sink_new_data
*data
) {
84 data
->proplist
= pa_proplist_new();
85 data
->ports
= pa_hashmap_new(pa_idxset_string_hash_func
, pa_idxset_string_compare_func
);
90 void pa_sink_new_data_set_name(pa_sink_new_data
*data
, const char *name
) {
94 data
->name
= pa_xstrdup(name
);
97 void pa_sink_new_data_set_sample_spec(pa_sink_new_data
*data
, const pa_sample_spec
*spec
) {
100 if ((data
->sample_spec_is_set
= !!spec
))
101 data
->sample_spec
= *spec
;
104 void pa_sink_new_data_set_channel_map(pa_sink_new_data
*data
, const pa_channel_map
*map
) {
107 if ((data
->channel_map_is_set
= !!map
))
108 data
->channel_map
= *map
;
111 void pa_sink_new_data_set_alternate_sample_rate(pa_sink_new_data
*data
, const uint32_t alternate_sample_rate
) {
114 data
->alternate_sample_rate_is_set
= TRUE
;
115 data
->alternate_sample_rate
= alternate_sample_rate
;
118 void pa_sink_new_data_set_volume(pa_sink_new_data
*data
, const pa_cvolume
*volume
) {
121 if ((data
->volume_is_set
= !!volume
))
122 data
->volume
= *volume
;
125 void pa_sink_new_data_set_muted(pa_sink_new_data
*data
, pa_bool_t mute
) {
128 data
->muted_is_set
= TRUE
;
129 data
->muted
= !!mute
;
132 void pa_sink_new_data_set_port(pa_sink_new_data
*data
, const char *port
) {
135 pa_xfree(data
->active_port
);
136 data
->active_port
= pa_xstrdup(port
);
139 void pa_sink_new_data_done(pa_sink_new_data
*data
) {
142 pa_proplist_free(data
->proplist
);
145 pa_hashmap_free(data
->ports
, (pa_free_cb_t
) pa_device_port_unref
);
147 pa_xfree(data
->name
);
148 pa_xfree(data
->active_port
);
152 /* Called from main context */
153 static void reset_callbacks(pa_sink
*s
) {
157 s
->get_volume
= NULL
;
158 s
->set_volume
= NULL
;
159 s
->write_volume
= NULL
;
162 s
->request_rewind
= NULL
;
163 s
->update_requested_latency
= NULL
;
165 s
->get_formats
= NULL
;
166 s
->set_formats
= NULL
;
167 s
->update_rate
= NULL
;
170 /* Called from main context */
171 pa_sink
* pa_sink_new(
173 pa_sink_new_data
*data
,
174 pa_sink_flags_t flags
) {
178 char st
[PA_SAMPLE_SPEC_SNPRINT_MAX
], cm
[PA_CHANNEL_MAP_SNPRINT_MAX
];
179 pa_source_new_data source_data
;
185 pa_assert(data
->name
);
186 pa_assert_ctl_context();
188 s
= pa_msgobject_new(pa_sink
);
190 if (!(name
= pa_namereg_register(core
, data
->name
, PA_NAMEREG_SINK
, s
, data
->namereg_fail
))) {
191 pa_log_debug("Failed to register name %s.", data
->name
);
196 pa_sink_new_data_set_name(data
, name
);
198 if (pa_hook_fire(&core
->hooks
[PA_CORE_HOOK_SINK_NEW
], data
) < 0) {
200 pa_namereg_unregister(core
, name
);
204 /* FIXME, need to free s here on failure */
206 pa_return_null_if_fail(!data
->driver
|| pa_utf8_valid(data
->driver
));
207 pa_return_null_if_fail(data
->name
&& pa_utf8_valid(data
->name
) && data
->name
[0]);
209 pa_return_null_if_fail(data
->sample_spec_is_set
&& pa_sample_spec_valid(&data
->sample_spec
));
211 if (!data
->channel_map_is_set
)
212 pa_return_null_if_fail(pa_channel_map_init_auto(&data
->channel_map
, data
->sample_spec
.channels
, PA_CHANNEL_MAP_DEFAULT
));
214 pa_return_null_if_fail(pa_channel_map_valid(&data
->channel_map
));
215 pa_return_null_if_fail(data
->channel_map
.channels
== data
->sample_spec
.channels
);
217 /* FIXME: There should probably be a general function for checking whether
218 * the sink volume is allowed to be set, like there is for sink inputs. */
219 pa_assert(!data
->volume_is_set
|| !(flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
221 if (!data
->volume_is_set
) {
222 pa_cvolume_reset(&data
->volume
, data
->sample_spec
.channels
);
223 data
->save_volume
= FALSE
;
226 pa_return_null_if_fail(pa_cvolume_valid(&data
->volume
));
227 pa_return_null_if_fail(pa_cvolume_compatible(&data
->volume
, &data
->sample_spec
));
229 if (!data
->muted_is_set
)
233 pa_proplist_update(data
->proplist
, PA_UPDATE_MERGE
, data
->card
->proplist
);
235 pa_device_init_description(data
->proplist
);
236 pa_device_init_icon(data
->proplist
, TRUE
);
237 pa_device_init_intended_roles(data
->proplist
);
239 if (pa_hook_fire(&core
->hooks
[PA_CORE_HOOK_SINK_FIXATE
], data
) < 0) {
241 pa_namereg_unregister(core
, name
);
245 s
->parent
.parent
.free
= sink_free
;
246 s
->parent
.process_msg
= pa_sink_process_msg
;
249 s
->state
= PA_SINK_INIT
;
252 s
->suspend_cause
= data
->suspend_cause
;
253 pa_sink_set_mixer_dirty(s
, FALSE
);
254 s
->name
= pa_xstrdup(name
);
255 s
->proplist
= pa_proplist_copy(data
->proplist
);
256 s
->driver
= pa_xstrdup(pa_path_get_filename(data
->driver
));
257 s
->module
= data
->module
;
258 s
->card
= data
->card
;
260 s
->priority
= pa_device_init_priority(s
->proplist
);
262 s
->sample_spec
= data
->sample_spec
;
263 s
->channel_map
= data
->channel_map
;
264 s
->default_sample_rate
= s
->sample_spec
.rate
;
266 if (data
->alternate_sample_rate_is_set
)
267 s
->alternate_sample_rate
= data
->alternate_sample_rate
;
269 s
->alternate_sample_rate
= s
->core
->alternate_sample_rate
;
271 if (s
->sample_spec
.rate
== s
->alternate_sample_rate
) {
272 pa_log_warn("Default and alternate sample rates are the same.");
273 s
->alternate_sample_rate
= 0;
276 s
->inputs
= pa_idxset_new(NULL
, NULL
);
278 s
->input_to_master
= NULL
;
280 s
->reference_volume
= s
->real_volume
= data
->volume
;
281 pa_cvolume_reset(&s
->soft_volume
, s
->sample_spec
.channels
);
282 s
->base_volume
= PA_VOLUME_NORM
;
283 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
284 s
->muted
= data
->muted
;
285 s
->refresh_volume
= s
->refresh_muted
= FALSE
;
292 /* As a minor optimization we just steal the list instead of
294 s
->ports
= data
->ports
;
297 s
->active_port
= NULL
;
298 s
->save_port
= FALSE
;
300 if (data
->active_port
)
301 if ((s
->active_port
= pa_hashmap_get(s
->ports
, data
->active_port
)))
302 s
->save_port
= data
->save_port
;
304 if (!s
->active_port
) {
308 PA_HASHMAP_FOREACH(p
, s
->ports
, state
)
309 if (!s
->active_port
|| p
->priority
> s
->active_port
->priority
)
314 s
->latency_offset
= s
->active_port
->latency_offset
;
316 s
->latency_offset
= 0;
318 s
->save_volume
= data
->save_volume
;
319 s
->save_muted
= data
->save_muted
;
321 pa_silence_memchunk_get(
322 &core
->silence_cache
,
328 s
->thread_info
.rtpoll
= NULL
;
329 s
->thread_info
.inputs
= pa_hashmap_new(pa_idxset_trivial_hash_func
, pa_idxset_trivial_compare_func
);
330 s
->thread_info
.soft_volume
= s
->soft_volume
;
331 s
->thread_info
.soft_muted
= s
->muted
;
332 s
->thread_info
.state
= s
->state
;
333 s
->thread_info
.rewind_nbytes
= 0;
334 s
->thread_info
.rewind_requested
= FALSE
;
335 s
->thread_info
.max_rewind
= 0;
336 s
->thread_info
.max_request
= 0;
337 s
->thread_info
.requested_latency_valid
= FALSE
;
338 s
->thread_info
.requested_latency
= 0;
339 s
->thread_info
.min_latency
= ABSOLUTE_MIN_LATENCY
;
340 s
->thread_info
.max_latency
= ABSOLUTE_MAX_LATENCY
;
341 s
->thread_info
.fixed_latency
= flags
& PA_SINK_DYNAMIC_LATENCY
? 0 : DEFAULT_FIXED_LATENCY
;
343 PA_LLIST_HEAD_INIT(pa_sink_volume_change
, s
->thread_info
.volume_changes
);
344 s
->thread_info
.volume_changes_tail
= NULL
;
345 pa_sw_cvolume_multiply(&s
->thread_info
.current_hw_volume
, &s
->soft_volume
, &s
->real_volume
);
346 s
->thread_info
.volume_change_safety_margin
= core
->deferred_volume_safety_margin_usec
;
347 s
->thread_info
.volume_change_extra_delay
= core
->deferred_volume_extra_delay_usec
;
348 s
->thread_info
.latency_offset
= s
->latency_offset
;
350 /* FIXME: This should probably be moved to pa_sink_put() */
351 pa_assert_se(pa_idxset_put(core
->sinks
, s
, &s
->index
) >= 0);
354 pa_assert_se(pa_idxset_put(s
->card
->sinks
, s
, NULL
) >= 0);
356 pt
= pa_proplist_to_string_sep(s
->proplist
, "\n ");
357 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
360 pa_sample_spec_snprint(st
, sizeof(st
), &s
->sample_spec
),
361 pa_channel_map_snprint(cm
, sizeof(cm
), &s
->channel_map
),
365 pa_source_new_data_init(&source_data
);
366 pa_source_new_data_set_sample_spec(&source_data
, &s
->sample_spec
);
367 pa_source_new_data_set_channel_map(&source_data
, &s
->channel_map
);
368 pa_source_new_data_set_alternate_sample_rate(&source_data
, s
->alternate_sample_rate
);
369 source_data
.name
= pa_sprintf_malloc("%s.monitor", name
);
370 source_data
.driver
= data
->driver
;
371 source_data
.module
= data
->module
;
372 source_data
.card
= data
->card
;
374 dn
= pa_proplist_gets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
375 pa_proplist_setf(source_data
.proplist
, PA_PROP_DEVICE_DESCRIPTION
, "Monitor of %s", dn
? dn
: s
->name
);
376 pa_proplist_sets(source_data
.proplist
, PA_PROP_DEVICE_CLASS
, "monitor");
378 s
->monitor_source
= pa_source_new(core
, &source_data
,
379 ((flags
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
380 ((flags
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SOURCE_DYNAMIC_LATENCY
: 0));
382 pa_source_new_data_done(&source_data
);
384 if (!s
->monitor_source
) {
390 s
->monitor_source
->monitor_of
= s
;
392 pa_source_set_latency_range(s
->monitor_source
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
393 pa_source_set_fixed_latency(s
->monitor_source
, s
->thread_info
.fixed_latency
);
394 pa_source_set_max_rewind(s
->monitor_source
, s
->thread_info
.max_rewind
);
399 /* Called from main context */
400 static int sink_set_state(pa_sink
*s
, pa_sink_state_t state
) {
402 pa_bool_t suspend_change
;
403 pa_sink_state_t original_state
;
406 pa_assert_ctl_context();
408 if (s
->state
== state
)
411 original_state
= s
->state
;
414 (original_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(state
)) ||
415 (PA_SINK_IS_OPENED(original_state
) && state
== PA_SINK_SUSPENDED
);
418 if ((ret
= s
->set_state(s
, state
)) < 0)
422 if ((ret
= pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_STATE
, PA_UINT_TO_PTR(state
), 0, NULL
)) < 0) {
425 s
->set_state(s
, original_state
);
432 if (state
!= PA_SINK_UNLINKED
) { /* if we enter UNLINKED state pa_sink_unlink() will fire the appropriate events */
433 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_STATE_CHANGED
], s
);
434 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
437 if (suspend_change
) {
441 /* We're suspending or resuming, tell everyone about it */
443 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
)
444 if (s
->state
== PA_SINK_SUSPENDED
&&
445 (i
->flags
& PA_SINK_INPUT_KILL_ON_SUSPEND
))
446 pa_sink_input_kill(i
);
448 i
->suspend(i
, state
== PA_SINK_SUSPENDED
);
450 if (s
->monitor_source
)
451 pa_source_sync_suspend(s
->monitor_source
);
457 void pa_sink_set_get_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
463 void pa_sink_set_set_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
464 pa_sink_flags_t flags
;
467 pa_assert(!s
->write_volume
|| cb
);
471 /* Save the current flags so we can tell if they've changed */
475 /* The sink implementor is responsible for setting decibel volume support */
476 s
->flags
|= PA_SINK_HW_VOLUME_CTRL
;
478 s
->flags
&= ~PA_SINK_HW_VOLUME_CTRL
;
479 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
480 pa_sink_enable_decibel_volume(s
, !(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
485 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
488 void pa_sink_set_write_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
489 pa_sink_flags_t flags
;
492 pa_assert(!cb
|| s
->set_volume
);
494 s
->write_volume
= cb
;
496 /* Save the current flags so we can tell if they've changed */
500 s
->flags
|= PA_SINK_DEFERRED_VOLUME
;
502 s
->flags
&= ~PA_SINK_DEFERRED_VOLUME
;
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
506 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
509 void pa_sink_set_get_mute_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
515 void pa_sink_set_set_mute_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
516 pa_sink_flags_t flags
;
522 /* Save the current flags so we can tell if they've changed */
526 s
->flags
|= PA_SINK_HW_MUTE_CTRL
;
528 s
->flags
&= ~PA_SINK_HW_MUTE_CTRL
;
530 /* If the flags have changed after init, let any clients know via a change event */
531 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
532 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
535 static void enable_flat_volume(pa_sink
*s
, pa_bool_t enable
) {
536 pa_sink_flags_t flags
;
540 /* Always follow the overall user preference here */
541 enable
= enable
&& s
->core
->flat_volumes
;
543 /* Save the current flags so we can tell if they've changed */
547 s
->flags
|= PA_SINK_FLAT_VOLUME
;
549 s
->flags
&= ~PA_SINK_FLAT_VOLUME
;
551 /* If the flags have changed after init, let any clients know via a change event */
552 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
553 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
556 void pa_sink_enable_decibel_volume(pa_sink
*s
, pa_bool_t enable
) {
557 pa_sink_flags_t flags
;
561 /* Save the current flags so we can tell if they've changed */
565 s
->flags
|= PA_SINK_DECIBEL_VOLUME
;
566 enable_flat_volume(s
, TRUE
);
568 s
->flags
&= ~PA_SINK_DECIBEL_VOLUME
;
569 enable_flat_volume(s
, FALSE
);
572 /* If the flags have changed after init, let any clients know via a change event */
573 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
574 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
577 /* Called from main context */
578 void pa_sink_put(pa_sink
* s
) {
579 pa_sink_assert_ref(s
);
580 pa_assert_ctl_context();
582 pa_assert(s
->state
== PA_SINK_INIT
);
583 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) || s
->input_to_master
);
585 /* The following fields must be initialized properly when calling _put() */
586 pa_assert(s
->asyncmsgq
);
587 pa_assert(s
->thread_info
.min_latency
<= s
->thread_info
.max_latency
);
589 /* Generally, flags should be initialized via pa_sink_new(). As a
590 * special exception we allow some volume related flags to be set
591 * between _new() and _put() by the callback setter functions above.
593 * Thus we implement a couple safeguards here which ensure the above
594 * setters were used (or at least the implementor made manual changes
595 * in a compatible way).
597 * Note: All of these flags set here can change over the life time
599 pa_assert(!(s
->flags
& PA_SINK_HW_VOLUME_CTRL
) || s
->set_volume
);
600 pa_assert(!(s
->flags
& PA_SINK_DEFERRED_VOLUME
) || s
->write_volume
);
601 pa_assert(!(s
->flags
& PA_SINK_HW_MUTE_CTRL
) || s
->set_mute
);
603 /* XXX: Currently decibel volume is disabled for all sinks that use volume
604 * sharing. When the master sink supports decibel volume, it would be good
605 * to have the flag also in the filter sink, but currently we don't do that
606 * so that the flags of the filter sink never change when it's moved from
607 * a master sink to another. One solution for this problem would be to
608 * remove user-visible volume altogether from filter sinks when volume
609 * sharing is used, but the current approach was easier to implement... */
610 /* We always support decibel volumes in software, otherwise we leave it to
611 * the sink implementor to set this flag as needed.
613 * Note: This flag can also change over the life time of the sink. */
614 if (!(s
->flags
& PA_SINK_HW_VOLUME_CTRL
) && !(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
615 pa_sink_enable_decibel_volume(s
, TRUE
);
617 /* If the sink implementor support DB volumes by itself, we should always
618 * try and enable flat volumes too */
619 if ((s
->flags
& PA_SINK_DECIBEL_VOLUME
))
620 enable_flat_volume(s
, TRUE
);
622 if (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) {
623 pa_sink
*root_sink
= pa_sink_get_master(s
);
625 pa_assert(root_sink
);
627 s
->reference_volume
= root_sink
->reference_volume
;
628 pa_cvolume_remap(&s
->reference_volume
, &root_sink
->channel_map
, &s
->channel_map
);
630 s
->real_volume
= root_sink
->real_volume
;
631 pa_cvolume_remap(&s
->real_volume
, &root_sink
->channel_map
, &s
->channel_map
);
633 /* We assume that if the sink implementor changed the default
634 * volume he did so in real_volume, because that is the usual
635 * place where he is supposed to place his changes. */
636 s
->reference_volume
= s
->real_volume
;
638 s
->thread_info
.soft_volume
= s
->soft_volume
;
639 s
->thread_info
.soft_muted
= s
->muted
;
640 pa_sw_cvolume_multiply(&s
->thread_info
.current_hw_volume
, &s
->soft_volume
, &s
->real_volume
);
642 pa_assert((s
->flags
& PA_SINK_HW_VOLUME_CTRL
)
643 || (s
->base_volume
== PA_VOLUME_NORM
644 && ((s
->flags
& PA_SINK_DECIBEL_VOLUME
|| (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)))));
645 pa_assert(!(s
->flags
& PA_SINK_DECIBEL_VOLUME
) || s
->n_volume_steps
== PA_VOLUME_NORM
+1);
646 pa_assert(!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
) == (s
->thread_info
.fixed_latency
!= 0));
647 pa_assert(!(s
->flags
& PA_SINK_LATENCY
) == !(s
->monitor_source
->flags
& PA_SOURCE_LATENCY
));
648 pa_assert(!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
) == !(s
->monitor_source
->flags
& PA_SOURCE_DYNAMIC_LATENCY
));
650 pa_assert(s
->monitor_source
->thread_info
.fixed_latency
== s
->thread_info
.fixed_latency
);
651 pa_assert(s
->monitor_source
->thread_info
.min_latency
== s
->thread_info
.min_latency
);
652 pa_assert(s
->monitor_source
->thread_info
.max_latency
== s
->thread_info
.max_latency
);
654 if (s
->suspend_cause
)
655 pa_assert_se(sink_set_state(s
, PA_SINK_SUSPENDED
) == 0);
657 pa_assert_se(sink_set_state(s
, PA_SINK_IDLE
) == 0);
659 pa_source_put(s
->monitor_source
);
661 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_NEW
, s
->index
);
662 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PUT
], s
);
665 /* Called from main context */
666 void pa_sink_unlink(pa_sink
* s
) {
668 pa_sink_input
*i
, *j
= NULL
;
671 pa_assert_ctl_context();
673 /* Please note that pa_sink_unlink() does more than simply
674 * reversing pa_sink_put(). It also undoes the registrations
675 * already done in pa_sink_new()! */
677 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
678 * may be called multiple times on the same sink without bad
681 linked
= PA_SINK_IS_LINKED(s
->state
);
684 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_UNLINK
], s
);
686 if (s
->state
!= PA_SINK_UNLINKED
)
687 pa_namereg_unregister(s
->core
, s
->name
);
688 pa_idxset_remove_by_data(s
->core
->sinks
, s
, NULL
);
691 pa_idxset_remove_by_data(s
->card
->sinks
, s
, NULL
);
693 while ((i
= pa_idxset_first(s
->inputs
, NULL
))) {
695 pa_sink_input_kill(i
);
700 sink_set_state(s
, PA_SINK_UNLINKED
);
702 s
->state
= PA_SINK_UNLINKED
;
706 if (s
->monitor_source
)
707 pa_source_unlink(s
->monitor_source
);
710 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_REMOVE
, s
->index
);
711 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_UNLINK_POST
], s
);
715 /* Called from main context */
716 static void sink_free(pa_object
*o
) {
717 pa_sink
*s
= PA_SINK(o
);
720 pa_assert_ctl_context();
721 pa_assert(pa_sink_refcnt(s
) == 0);
723 if (PA_SINK_IS_LINKED(s
->state
))
726 pa_log_info("Freeing sink %u \"%s\"", s
->index
, s
->name
);
728 if (s
->monitor_source
) {
729 pa_source_unref(s
->monitor_source
);
730 s
->monitor_source
= NULL
;
733 pa_idxset_free(s
->inputs
, NULL
);
734 pa_hashmap_free(s
->thread_info
.inputs
, (pa_free_cb_t
) pa_sink_input_unref
);
736 if (s
->silence
.memblock
)
737 pa_memblock_unref(s
->silence
.memblock
);
743 pa_proplist_free(s
->proplist
);
746 pa_hashmap_free(s
->ports
, (pa_free_cb_t
) pa_device_port_unref
);
751 /* Called from main context, and not while the IO thread is active, please */
752 void pa_sink_set_asyncmsgq(pa_sink
*s
, pa_asyncmsgq
*q
) {
753 pa_sink_assert_ref(s
);
754 pa_assert_ctl_context();
758 if (s
->monitor_source
)
759 pa_source_set_asyncmsgq(s
->monitor_source
, q
);
762 /* Called from main context, and not while the IO thread is active, please */
763 void pa_sink_update_flags(pa_sink
*s
, pa_sink_flags_t mask
, pa_sink_flags_t value
) {
764 pa_sink_flags_t old_flags
;
765 pa_sink_input
*input
;
768 pa_sink_assert_ref(s
);
769 pa_assert_ctl_context();
771 /* For now, allow only a minimal set of flags to be changed. */
772 pa_assert((mask
& ~(PA_SINK_DYNAMIC_LATENCY
|PA_SINK_LATENCY
)) == 0);
774 old_flags
= s
->flags
;
775 s
->flags
= (s
->flags
& ~mask
) | (value
& mask
);
777 if (s
->flags
== old_flags
)
780 if ((s
->flags
& PA_SINK_LATENCY
) != (old_flags
& PA_SINK_LATENCY
))
781 pa_log_debug("Sink %s: LATENCY flag %s.", s
->name
, (s
->flags
& PA_SINK_LATENCY
) ? "enabled" : "disabled");
783 if ((s
->flags
& PA_SINK_DYNAMIC_LATENCY
) != (old_flags
& PA_SINK_DYNAMIC_LATENCY
))
784 pa_log_debug("Sink %s: DYNAMIC_LATENCY flag %s.",
785 s
->name
, (s
->flags
& PA_SINK_DYNAMIC_LATENCY
) ? "enabled" : "disabled");
787 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
788 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_FLAGS_CHANGED
], s
);
790 if (s
->monitor_source
)
791 pa_source_update_flags(s
->monitor_source
,
792 ((mask
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
793 ((mask
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SOURCE_DYNAMIC_LATENCY
: 0),
794 ((value
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
795 ((value
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SOURCE_DYNAMIC_LATENCY
: 0));
797 PA_IDXSET_FOREACH(input
, s
->inputs
, idx
) {
798 if (input
->origin_sink
)
799 pa_sink_update_flags(input
->origin_sink
, mask
, value
);
803 /* Called from IO context, or before _put() from main context */
804 void pa_sink_set_rtpoll(pa_sink
*s
, pa_rtpoll
*p
) {
805 pa_sink_assert_ref(s
);
806 pa_sink_assert_io_context(s
);
808 s
->thread_info
.rtpoll
= p
;
810 if (s
->monitor_source
)
811 pa_source_set_rtpoll(s
->monitor_source
, p
);
814 /* Called from main context */
815 int pa_sink_update_status(pa_sink
*s
) {
816 pa_sink_assert_ref(s
);
817 pa_assert_ctl_context();
818 pa_assert(PA_SINK_IS_LINKED(s
->state
));
820 if (s
->state
== PA_SINK_SUSPENDED
)
823 return sink_set_state(s
, pa_sink_used_by(s
) ? PA_SINK_RUNNING
: PA_SINK_IDLE
);
826 /* Called from any context - must be threadsafe */
827 void pa_sink_set_mixer_dirty(pa_sink
*s
, pa_bool_t is_dirty
)
829 pa_atomic_store(&s
->mixer_dirty
, is_dirty
? 1 : 0);
832 /* Called from main context */
833 int pa_sink_suspend(pa_sink
*s
, pa_bool_t suspend
, pa_suspend_cause_t cause
) {
834 pa_sink_assert_ref(s
);
835 pa_assert_ctl_context();
836 pa_assert(PA_SINK_IS_LINKED(s
->state
));
837 pa_assert(cause
!= 0);
840 s
->suspend_cause
|= cause
;
841 s
->monitor_source
->suspend_cause
|= cause
;
843 s
->suspend_cause
&= ~cause
;
844 s
->monitor_source
->suspend_cause
&= ~cause
;
847 if (!(s
->suspend_cause
& PA_SUSPEND_SESSION
) && (pa_atomic_load(&s
->mixer_dirty
) != 0)) {
848 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
849 it'll be handled just fine. */
850 pa_sink_set_mixer_dirty(s
, FALSE
);
851 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
852 if (s
->active_port
&& s
->set_port
) {
853 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
) {
854 struct sink_message_set_port msg
= { .port
= s
->active_port
, .ret
= 0 };
855 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_PORT
, &msg
, 0, NULL
) == 0);
858 s
->set_port(s
, s
->active_port
);
868 if ((pa_sink_get_state(s
) == PA_SINK_SUSPENDED
) == !!s
->suspend_cause
)
871 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s
->name
, s
->suspend_cause
, s
->suspend_cause
? "suspending" : "resuming");
873 if (s
->suspend_cause
)
874 return sink_set_state(s
, PA_SINK_SUSPENDED
);
876 return sink_set_state(s
, pa_sink_used_by(s
) ? PA_SINK_RUNNING
: PA_SINK_IDLE
);
879 /* Called from main context */
880 pa_queue
*pa_sink_move_all_start(pa_sink
*s
, pa_queue
*q
) {
881 pa_sink_input
*i
, *n
;
884 pa_sink_assert_ref(s
);
885 pa_assert_ctl_context();
886 pa_assert(PA_SINK_IS_LINKED(s
->state
));
891 for (i
= PA_SINK_INPUT(pa_idxset_first(s
->inputs
, &idx
)); i
; i
= n
) {
892 n
= PA_SINK_INPUT(pa_idxset_next(s
->inputs
, &idx
));
894 pa_sink_input_ref(i
);
896 if (pa_sink_input_start_move(i
) >= 0)
899 pa_sink_input_unref(i
);
905 /* Called from main context */
906 void pa_sink_move_all_finish(pa_sink
*s
, pa_queue
*q
, pa_bool_t save
) {
909 pa_sink_assert_ref(s
);
910 pa_assert_ctl_context();
911 pa_assert(PA_SINK_IS_LINKED(s
->state
));
914 while ((i
= PA_SINK_INPUT(pa_queue_pop(q
)))) {
915 if (pa_sink_input_finish_move(i
, s
, save
) < 0)
916 pa_sink_input_fail_move(i
);
918 pa_sink_input_unref(i
);
921 pa_queue_free(q
, NULL
);
924 /* Called from main context */
925 void pa_sink_move_all_fail(pa_queue
*q
) {
928 pa_assert_ctl_context();
931 while ((i
= PA_SINK_INPUT(pa_queue_pop(q
)))) {
932 pa_sink_input_fail_move(i
);
933 pa_sink_input_unref(i
);
936 pa_queue_free(q
, NULL
);
939 /* Called from IO thread context */
940 size_t pa_sink_process_input_underruns(pa_sink
*s
, size_t left_to_play
) {
945 pa_sink_assert_ref(s
);
946 pa_sink_assert_io_context(s
);
948 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
949 size_t uf
= i
->thread_info
.underrun_for_sink
;
952 if (uf
>= left_to_play
) {
953 if (pa_sink_input_process_underrun(i
))
956 else if (uf
> result
)
961 pa_log_debug("Found underrun %ld bytes ago (%ld bytes ahead in playback buffer)", (long) result
, (long) left_to_play
- result
);
962 return left_to_play
- result
;
965 /* Called from IO thread context */
966 void pa_sink_process_rewind(pa_sink
*s
, size_t nbytes
) {
970 pa_sink_assert_ref(s
);
971 pa_sink_assert_io_context(s
);
972 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
974 /* If nobody requested this and this is actually no real rewind
975 * then we can short cut this. Please note that this means that
976 * not all rewind requests triggered upstream will always be
977 * translated in actual requests! */
978 if (!s
->thread_info
.rewind_requested
&& nbytes
<= 0)
981 s
->thread_info
.rewind_nbytes
= 0;
982 s
->thread_info
.rewind_requested
= FALSE
;
985 pa_log_debug("Processing rewind...");
986 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
)
987 pa_sink_volume_change_rewind(s
, nbytes
);
990 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
991 pa_sink_input_assert_ref(i
);
992 pa_sink_input_process_rewind(i
, nbytes
);
996 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
))
997 pa_source_process_rewind(s
->monitor_source
, nbytes
);
1001 /* Called from IO thread context */
1002 static unsigned fill_mix_info(pa_sink
*s
, size_t *length
, pa_mix_info
*info
, unsigned maxinfo
) {
1006 size_t mixlength
= *length
;
1008 pa_sink_assert_ref(s
);
1009 pa_sink_assert_io_context(s
);
1012 while ((i
= pa_hashmap_iterate(s
->thread_info
.inputs
, &state
, NULL
)) && maxinfo
> 0) {
1013 pa_sink_input_assert_ref(i
);
1015 pa_sink_input_peek(i
, *length
, &info
->chunk
, &info
->volume
);
1017 if (mixlength
== 0 || info
->chunk
.length
< mixlength
)
1018 mixlength
= info
->chunk
.length
;
1020 if (pa_memblock_is_silence(info
->chunk
.memblock
)) {
1021 pa_memblock_unref(info
->chunk
.memblock
);
1025 info
->userdata
= pa_sink_input_ref(i
);
1027 pa_assert(info
->chunk
.memblock
);
1028 pa_assert(info
->chunk
.length
> 0);
1036 *length
= mixlength
;
1041 /* Called from IO thread context */
1042 static void inputs_drop(pa_sink
*s
, pa_mix_info
*info
, unsigned n
, pa_memchunk
*result
) {
1046 unsigned n_unreffed
= 0;
1048 pa_sink_assert_ref(s
);
1049 pa_sink_assert_io_context(s
);
1051 pa_assert(result
->memblock
);
1052 pa_assert(result
->length
> 0);
1054 /* We optimize for the case where the order of the inputs has not changed */
1056 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
1058 pa_mix_info
* m
= NULL
;
1060 pa_sink_input_assert_ref(i
);
1062 /* Let's try to find the matching entry info the pa_mix_info array */
1063 for (j
= 0; j
< n
; j
++) {
1065 if (info
[p
].userdata
== i
) {
1075 /* Drop read data */
1076 pa_sink_input_drop(i
, result
->length
);
1078 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
)) {
1080 if (pa_hashmap_size(i
->thread_info
.direct_outputs
) > 0) {
1081 void *ostate
= NULL
;
1082 pa_source_output
*o
;
1085 if (m
&& m
->chunk
.memblock
) {
1087 pa_memblock_ref(c
.memblock
);
1088 pa_assert(result
->length
<= c
.length
);
1089 c
.length
= result
->length
;
1091 pa_memchunk_make_writable(&c
, 0);
1092 pa_volume_memchunk(&c
, &s
->sample_spec
, &m
->volume
);
1095 pa_memblock_ref(c
.memblock
);
1096 pa_assert(result
->length
<= c
.length
);
1097 c
.length
= result
->length
;
1100 while ((o
= pa_hashmap_iterate(i
->thread_info
.direct_outputs
, &ostate
, NULL
))) {
1101 pa_source_output_assert_ref(o
);
1102 pa_assert(o
->direct_on_input
== i
);
1103 pa_source_post_direct(s
->monitor_source
, o
, &c
);
1106 pa_memblock_unref(c
.memblock
);
1111 if (m
->chunk
.memblock
)
1112 pa_memblock_unref(m
->chunk
.memblock
);
1113 pa_memchunk_reset(&m
->chunk
);
1115 pa_sink_input_unref(m
->userdata
);
1122 /* Now drop references to entries that are included in the
1123 * pa_mix_info array but don't exist anymore */
1125 if (n_unreffed
< n
) {
1126 for (; n
> 0; info
++, n
--) {
1128 pa_sink_input_unref(info
->userdata
);
1129 if (info
->chunk
.memblock
)
1130 pa_memblock_unref(info
->chunk
.memblock
);
1134 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
))
1135 pa_source_post(s
->monitor_source
, result
);
1138 /* Called from IO thread context */
1139 void pa_sink_render(pa_sink
*s
, size_t length
, pa_memchunk
*result
) {
1140 pa_mix_info info
[MAX_MIX_CHANNELS
];
1142 size_t block_size_max
;
1144 pa_sink_assert_ref(s
);
1145 pa_sink_assert_io_context(s
);
1146 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1147 pa_assert(pa_frame_aligned(length
, &s
->sample_spec
));
1150 pa_assert(!s
->thread_info
.rewind_requested
);
1151 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1153 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1154 result
->memblock
= pa_memblock_ref(s
->silence
.memblock
);
1155 result
->index
= s
->silence
.index
;
1156 result
->length
= PA_MIN(s
->silence
.length
, length
);
1163 length
= pa_frame_align(MIX_BUFFER_LENGTH
, &s
->sample_spec
);
1165 block_size_max
= pa_mempool_block_size_max(s
->core
->mempool
);
1166 if (length
> block_size_max
)
1167 length
= pa_frame_align(block_size_max
, &s
->sample_spec
);
1169 pa_assert(length
> 0);
1171 n
= fill_mix_info(s
, &length
, info
, MAX_MIX_CHANNELS
);
1175 *result
= s
->silence
;
1176 pa_memblock_ref(result
->memblock
);
1178 if (result
->length
> length
)
1179 result
->length
= length
;
1181 } else if (n
== 1) {
1184 *result
= info
[0].chunk
;
1185 pa_memblock_ref(result
->memblock
);
1187 if (result
->length
> length
)
1188 result
->length
= length
;
1190 pa_sw_cvolume_multiply(&volume
, &s
->thread_info
.soft_volume
, &info
[0].volume
);
1192 if (s
->thread_info
.soft_muted
|| pa_cvolume_is_muted(&volume
)) {
1193 pa_memblock_unref(result
->memblock
);
1194 pa_silence_memchunk_get(&s
->core
->silence_cache
,
1199 } else if (!pa_cvolume_is_norm(&volume
)) {
1200 pa_memchunk_make_writable(result
, 0);
1201 pa_volume_memchunk(result
, &s
->sample_spec
, &volume
);
1205 result
->memblock
= pa_memblock_new(s
->core
->mempool
, length
);
1207 ptr
= pa_memblock_acquire(result
->memblock
);
1208 result
->length
= pa_mix(info
, n
,
1211 &s
->thread_info
.soft_volume
,
1212 s
->thread_info
.soft_muted
);
1213 pa_memblock_release(result
->memblock
);
1218 inputs_drop(s
, info
, n
, result
);
1223 /* Called from IO thread context */
1224 void pa_sink_render_into(pa_sink
*s
, pa_memchunk
*target
) {
1225 pa_mix_info info
[MAX_MIX_CHANNELS
];
1227 size_t length
, block_size_max
;
1229 pa_sink_assert_ref(s
);
1230 pa_sink_assert_io_context(s
);
1231 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1233 pa_assert(target
->memblock
);
1234 pa_assert(target
->length
> 0);
1235 pa_assert(pa_frame_aligned(target
->length
, &s
->sample_spec
));
1237 pa_assert(!s
->thread_info
.rewind_requested
);
1238 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1240 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1241 pa_silence_memchunk(target
, &s
->sample_spec
);
1247 length
= target
->length
;
1248 block_size_max
= pa_mempool_block_size_max(s
->core
->mempool
);
1249 if (length
> block_size_max
)
1250 length
= pa_frame_align(block_size_max
, &s
->sample_spec
);
1252 pa_assert(length
> 0);
1254 n
= fill_mix_info(s
, &length
, info
, MAX_MIX_CHANNELS
);
1257 if (target
->length
> length
)
1258 target
->length
= length
;
1260 pa_silence_memchunk(target
, &s
->sample_spec
);
1261 } else if (n
== 1) {
1264 if (target
->length
> length
)
1265 target
->length
= length
;
1267 pa_sw_cvolume_multiply(&volume
, &s
->thread_info
.soft_volume
, &info
[0].volume
);
1269 if (s
->thread_info
.soft_muted
|| pa_cvolume_is_muted(&volume
))
1270 pa_silence_memchunk(target
, &s
->sample_spec
);
1274 vchunk
= info
[0].chunk
;
1275 pa_memblock_ref(vchunk
.memblock
);
1277 if (vchunk
.length
> length
)
1278 vchunk
.length
= length
;
1280 if (!pa_cvolume_is_norm(&volume
)) {
1281 pa_memchunk_make_writable(&vchunk
, 0);
1282 pa_volume_memchunk(&vchunk
, &s
->sample_spec
, &volume
);
1285 pa_memchunk_memcpy(target
, &vchunk
);
1286 pa_memblock_unref(vchunk
.memblock
);
1292 ptr
= pa_memblock_acquire(target
->memblock
);
1294 target
->length
= pa_mix(info
, n
,
1295 (uint8_t*) ptr
+ target
->index
, length
,
1297 &s
->thread_info
.soft_volume
,
1298 s
->thread_info
.soft_muted
);
1300 pa_memblock_release(target
->memblock
);
1303 inputs_drop(s
, info
, n
, target
);
1308 /* Called from IO thread context */
1309 void pa_sink_render_into_full(pa_sink
*s
, pa_memchunk
*target
) {
1313 pa_sink_assert_ref(s
);
1314 pa_sink_assert_io_context(s
);
1315 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1317 pa_assert(target
->memblock
);
1318 pa_assert(target
->length
> 0);
1319 pa_assert(pa_frame_aligned(target
->length
, &s
->sample_spec
));
1321 pa_assert(!s
->thread_info
.rewind_requested
);
1322 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1324 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1325 pa_silence_memchunk(target
, &s
->sample_spec
);
1338 pa_sink_render_into(s
, &chunk
);
1347 /* Called from IO thread context */
1348 void pa_sink_render_full(pa_sink
*s
, size_t length
, pa_memchunk
*result
) {
1349 pa_sink_assert_ref(s
);
1350 pa_sink_assert_io_context(s
);
1351 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1352 pa_assert(length
> 0);
1353 pa_assert(pa_frame_aligned(length
, &s
->sample_spec
));
1356 pa_assert(!s
->thread_info
.rewind_requested
);
1357 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1361 pa_sink_render(s
, length
, result
);
1363 if (result
->length
< length
) {
1366 pa_memchunk_make_writable(result
, length
);
1368 chunk
.memblock
= result
->memblock
;
1369 chunk
.index
= result
->index
+ result
->length
;
1370 chunk
.length
= length
- result
->length
;
1372 pa_sink_render_into_full(s
, &chunk
);
1374 result
->length
= length
;
1380 /* Called from main thread */
1381 pa_bool_t
pa_sink_update_rate(pa_sink
*s
, uint32_t rate
, pa_bool_t passthrough
)
1383 if (s
->update_rate
) {
1384 uint32_t desired_rate
= rate
;
1385 uint32_t default_rate
= s
->default_sample_rate
;
1386 uint32_t alternate_rate
= s
->alternate_sample_rate
;
1389 pa_bool_t use_alternate
= FALSE
;
1391 if (PA_UNLIKELY(default_rate
== alternate_rate
)) {
1392 pa_log_warn("Default and alternate sample rates are the same.");
1396 if (PA_SINK_IS_RUNNING(s
->state
)) {
1397 pa_log_info("Cannot update rate, SINK_IS_RUNNING, will keep using %u Hz",
1398 s
->sample_spec
.rate
);
1402 if (s
->monitor_source
) {
1403 if (PA_SOURCE_IS_RUNNING(s
->monitor_source
->state
) == TRUE
) {
1404 pa_log_info("Cannot update rate, monitor source is RUNNING");
1409 if (PA_UNLIKELY (desired_rate
< 8000 ||
1410 desired_rate
> PA_RATE_MAX
))
1414 pa_assert(default_rate
% 4000 || default_rate
% 11025);
1415 pa_assert(alternate_rate
% 4000 || alternate_rate
% 11025);
1417 if (default_rate
% 4000) {
1418 /* default is a 11025 multiple */
1419 if ((alternate_rate
% 4000 == 0) && (desired_rate
% 4000 == 0))
1422 /* default is 4000 multiple */
1423 if ((alternate_rate
% 11025 == 0) && (desired_rate
% 11025 == 0))
1428 desired_rate
= alternate_rate
;
1430 desired_rate
= default_rate
;
1432 desired_rate
= rate
; /* use stream sampling rate, discard default/alternate settings */
1435 if (desired_rate
== s
->sample_spec
.rate
)
1438 if (!passthrough
&& pa_sink_used_by(s
) > 0)
1441 pa_log_debug("Suspending sink %s due to changing the sample rate.", s
->name
);
1442 pa_sink_suspend(s
, TRUE
, PA_SUSPEND_IDLE
); /* needed before rate update, will be resumed automatically */
1444 if (s
->update_rate(s
, desired_rate
) == TRUE
) {
1445 /* update monitor source as well */
1446 if (s
->monitor_source
&& !passthrough
)
1447 pa_source_update_rate(s
->monitor_source
, desired_rate
, FALSE
);
1448 pa_log_info("Changed sampling rate successfully");
1450 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1451 if (i
->state
== PA_SINK_INPUT_CORKED
)
1452 pa_sink_input_update_rate(i
);
1461 /* Called from main thread */
1462 pa_usec_t
pa_sink_get_latency(pa_sink
*s
) {
1465 pa_sink_assert_ref(s
);
1466 pa_assert_ctl_context();
1467 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1469 /* The returned value is supposed to be in the time domain of the sound card! */
1471 if (s
->state
== PA_SINK_SUSPENDED
)
1474 if (!(s
->flags
& PA_SINK_LATENCY
))
1477 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_LATENCY
, &usec
, 0, NULL
) == 0);
1479 /* usec is unsigned, so check that the offset can be added to usec without
1481 if (-s
->latency_offset
<= (int64_t) usec
)
1482 usec
+= s
->latency_offset
;
1489 /* Called from IO thread */
1490 pa_usec_t
pa_sink_get_latency_within_thread(pa_sink
*s
) {
1494 pa_sink_assert_ref(s
);
1495 pa_sink_assert_io_context(s
);
1496 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1498 /* The returned value is supposed to be in the time domain of the sound card! */
1500 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
)
1503 if (!(s
->flags
& PA_SINK_LATENCY
))
1506 o
= PA_MSGOBJECT(s
);
1508 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1510 if (o
->process_msg(o
, PA_SINK_MESSAGE_GET_LATENCY
, &usec
, 0, NULL
) < 0)
1513 /* usec is unsigned, so check that the offset can be added to usec without
1515 if (-s
->thread_info
.latency_offset
<= (int64_t) usec
)
1516 usec
+= s
->thread_info
.latency_offset
;
1523 /* Called from the main thread (and also from the IO thread while the main
1524 * thread is waiting).
1526 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1527 * set. Instead, flat volume mode is detected by checking whether the root sink
1528 * has the flag set. */
1529 pa_bool_t
pa_sink_flat_volume_enabled(pa_sink
*s
) {
1530 pa_sink_assert_ref(s
);
1532 s
= pa_sink_get_master(s
);
1535 return (s
->flags
& PA_SINK_FLAT_VOLUME
);
1540 /* Called from the main thread (and also from the IO thread while the main
1541 * thread is waiting). */
1542 pa_sink
*pa_sink_get_master(pa_sink
*s
) {
1543 pa_sink_assert_ref(s
);
1545 while (s
&& (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1546 if (PA_UNLIKELY(!s
->input_to_master
))
1549 s
= s
->input_to_master
->sink
;
1555 /* Called from main context */
1556 pa_bool_t
pa_sink_is_passthrough(pa_sink
*s
) {
1557 pa_sink_input
*alt_i
;
1560 pa_sink_assert_ref(s
);
1562 /* one and only one PASSTHROUGH input can possibly be connected */
1563 if (pa_idxset_size(s
->inputs
) == 1) {
1564 alt_i
= pa_idxset_first(s
->inputs
, &idx
);
1566 if (pa_sink_input_is_passthrough(alt_i
))
1573 /* Called from main context */
1574 void pa_sink_enter_passthrough(pa_sink
*s
) {
1577 /* disable the monitor in passthrough mode */
1578 if (s
->monitor_source
) {
1579 pa_log_debug("Suspending monitor source %s, because the sink is entering the passthrough mode.", s
->monitor_source
->name
);
1580 pa_source_suspend(s
->monitor_source
, TRUE
, PA_SUSPEND_PASSTHROUGH
);
1583 /* set the volume to NORM */
1584 s
->saved_volume
= *pa_sink_get_volume(s
, TRUE
);
1585 s
->saved_save_volume
= s
->save_volume
;
1587 pa_cvolume_set(&volume
, s
->sample_spec
.channels
, PA_MIN(s
->base_volume
, PA_VOLUME_NORM
));
1588 pa_sink_set_volume(s
, &volume
, TRUE
, FALSE
);
1591 /* Called from main context */
1592 void pa_sink_leave_passthrough(pa_sink
*s
) {
1593 /* Unsuspend monitor */
1594 if (s
->monitor_source
) {
1595 pa_log_debug("Resuming monitor source %s, because the sink is leaving the passthrough mode.", s
->monitor_source
->name
);
1596 pa_source_suspend(s
->monitor_source
, FALSE
, PA_SUSPEND_PASSTHROUGH
);
1599 /* Restore sink volume to what it was before we entered passthrough mode */
1600 pa_sink_set_volume(s
, &s
->saved_volume
, TRUE
, s
->saved_save_volume
);
1602 pa_cvolume_init(&s
->saved_volume
);
1603 s
->saved_save_volume
= FALSE
;
1606 /* Called from main context. */
1607 static void compute_reference_ratio(pa_sink_input
*i
) {
1609 pa_cvolume remapped
;
1612 pa_assert(pa_sink_flat_volume_enabled(i
->sink
));
1615 * Calculates the reference ratio from the sink's reference
1616 * volume. This basically calculates:
1618 * i->reference_ratio = i->volume / i->sink->reference_volume
1621 remapped
= i
->sink
->reference_volume
;
1622 pa_cvolume_remap(&remapped
, &i
->sink
->channel_map
, &i
->channel_map
);
1624 i
->reference_ratio
.channels
= i
->sample_spec
.channels
;
1626 for (c
= 0; c
< i
->sample_spec
.channels
; c
++) {
1628 /* We don't update when the sink volume is 0 anyway */
1629 if (remapped
.values
[c
] <= PA_VOLUME_MUTED
)
1632 /* Don't update the reference ratio unless necessary */
1633 if (pa_sw_volume_multiply(
1634 i
->reference_ratio
.values
[c
],
1635 remapped
.values
[c
]) == i
->volume
.values
[c
])
1638 i
->reference_ratio
.values
[c
] = pa_sw_volume_divide(
1639 i
->volume
.values
[c
],
1640 remapped
.values
[c
]);
1644 /* Called from main context. Only called for the root sink in volume sharing
1645 * cases, except for internal recursive calls. */
1646 static void compute_reference_ratios(pa_sink
*s
) {
1650 pa_sink_assert_ref(s
);
1651 pa_assert_ctl_context();
1652 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1653 pa_assert(pa_sink_flat_volume_enabled(s
));
1655 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1656 compute_reference_ratio(i
);
1658 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1659 compute_reference_ratios(i
->origin_sink
);
1663 /* Called from main context. Only called for the root sink in volume sharing
1664 * cases, except for internal recursive calls. */
1665 static void compute_real_ratios(pa_sink
*s
) {
1669 pa_sink_assert_ref(s
);
1670 pa_assert_ctl_context();
1671 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1672 pa_assert(pa_sink_flat_volume_enabled(s
));
1674 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1676 pa_cvolume remapped
;
1678 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1679 /* The origin sink uses volume sharing, so this input's real ratio
1680 * is handled as a special case - the real ratio must be 0 dB, and
1681 * as a result i->soft_volume must equal i->volume_factor. */
1682 pa_cvolume_reset(&i
->real_ratio
, i
->real_ratio
.channels
);
1683 i
->soft_volume
= i
->volume_factor
;
1685 compute_real_ratios(i
->origin_sink
);
1691 * This basically calculates:
1693 * i->real_ratio := i->volume / s->real_volume
1694 * i->soft_volume := i->real_ratio * i->volume_factor
1697 remapped
= s
->real_volume
;
1698 pa_cvolume_remap(&remapped
, &s
->channel_map
, &i
->channel_map
);
1700 i
->real_ratio
.channels
= i
->sample_spec
.channels
;
1701 i
->soft_volume
.channels
= i
->sample_spec
.channels
;
1703 for (c
= 0; c
< i
->sample_spec
.channels
; c
++) {
1705 if (remapped
.values
[c
] <= PA_VOLUME_MUTED
) {
1706 /* We leave i->real_ratio untouched */
1707 i
->soft_volume
.values
[c
] = PA_VOLUME_MUTED
;
1711 /* Don't lose accuracy unless necessary */
1712 if (pa_sw_volume_multiply(
1713 i
->real_ratio
.values
[c
],
1714 remapped
.values
[c
]) != i
->volume
.values
[c
])
1716 i
->real_ratio
.values
[c
] = pa_sw_volume_divide(
1717 i
->volume
.values
[c
],
1718 remapped
.values
[c
]);
1720 i
->soft_volume
.values
[c
] = pa_sw_volume_multiply(
1721 i
->real_ratio
.values
[c
],
1722 i
->volume_factor
.values
[c
]);
1725 /* We don't copy the soft_volume to the thread_info data
1726 * here. That must be done by the caller */
1730 static pa_cvolume
*cvolume_remap_minimal_impact(
1732 const pa_cvolume
*template,
1733 const pa_channel_map
*from
,
1734 const pa_channel_map
*to
) {
1739 pa_assert(template);
1742 pa_assert(pa_cvolume_compatible_with_channel_map(v
, from
));
1743 pa_assert(pa_cvolume_compatible_with_channel_map(template, to
));
1745 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1746 * mapping from sink input to sink volumes:
1748 * If template is a possible remapping from v it is used instead
1749 * of remapping anew.
1751 * If the channel maps don't match we set an all-channel volume on
1752 * the sink to ensure that changing a volume on one stream has no
1753 * effect that cannot be compensated for in another stream that
1754 * does not have the same channel map as the sink. */
1756 if (pa_channel_map_equal(from
, to
))
1760 if (pa_cvolume_equal(pa_cvolume_remap(&t
, to
, from
), v
)) {
1765 pa_cvolume_set(v
, to
->channels
, pa_cvolume_max(v
));
1769 /* Called from main thread. Only called for the root sink in volume sharing
1770 * cases, except for internal recursive calls. */
1771 static void get_maximum_input_volume(pa_sink
*s
, pa_cvolume
*max_volume
, const pa_channel_map
*channel_map
) {
1775 pa_sink_assert_ref(s
);
1776 pa_assert(max_volume
);
1777 pa_assert(channel_map
);
1778 pa_assert(pa_sink_flat_volume_enabled(s
));
1780 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1781 pa_cvolume remapped
;
1783 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1784 get_maximum_input_volume(i
->origin_sink
, max_volume
, channel_map
);
1786 /* Ignore this input. The origin sink uses volume sharing, so this
1787 * input's volume will be set to be equal to the root sink's real
1788 * volume. Obviously this input's current volume must not then
1789 * affect what the root sink's real volume will be. */
1793 remapped
= i
->volume
;
1794 cvolume_remap_minimal_impact(&remapped
, max_volume
, &i
->channel_map
, channel_map
);
1795 pa_cvolume_merge(max_volume
, max_volume
, &remapped
);
1799 /* Called from main thread. Only called for the root sink in volume sharing
1800 * cases, except for internal recursive calls. */
1801 static pa_bool_t
has_inputs(pa_sink
*s
) {
1805 pa_sink_assert_ref(s
);
1807 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1808 if (!i
->origin_sink
|| !(i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) || has_inputs(i
->origin_sink
))
1815 /* Called from main thread. Only called for the root sink in volume sharing
1816 * cases, except for internal recursive calls. */
1817 static void update_real_volume(pa_sink
*s
, const pa_cvolume
*new_volume
, pa_channel_map
*channel_map
) {
1821 pa_sink_assert_ref(s
);
1822 pa_assert(new_volume
);
1823 pa_assert(channel_map
);
1825 s
->real_volume
= *new_volume
;
1826 pa_cvolume_remap(&s
->real_volume
, channel_map
, &s
->channel_map
);
1828 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1829 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1830 if (pa_sink_flat_volume_enabled(s
)) {
1831 pa_cvolume old_volume
= i
->volume
;
1833 /* Follow the root sink's real volume. */
1834 i
->volume
= *new_volume
;
1835 pa_cvolume_remap(&i
->volume
, channel_map
, &i
->channel_map
);
1836 compute_reference_ratio(i
);
1838 /* The volume changed, let's tell people so */
1839 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
1840 if (i
->volume_changed
)
1841 i
->volume_changed(i
);
1843 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
1847 update_real_volume(i
->origin_sink
, new_volume
, channel_map
);
1852 /* Called from main thread. Only called for the root sink in shared volume
1854 static void compute_real_volume(pa_sink
*s
) {
1855 pa_sink_assert_ref(s
);
1856 pa_assert_ctl_context();
1857 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1858 pa_assert(pa_sink_flat_volume_enabled(s
));
1859 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
1861 /* This determines the maximum volume of all streams and sets
1862 * s->real_volume accordingly. */
1864 if (!has_inputs(s
)) {
1865 /* In the special case that we have no sink inputs we leave the
1866 * volume unmodified. */
1867 update_real_volume(s
, &s
->reference_volume
, &s
->channel_map
);
1871 pa_cvolume_mute(&s
->real_volume
, s
->channel_map
.channels
);
1873 /* First let's determine the new maximum volume of all inputs
1874 * connected to this sink */
1875 get_maximum_input_volume(s
, &s
->real_volume
, &s
->channel_map
);
1876 update_real_volume(s
, &s
->real_volume
, &s
->channel_map
);
1878 /* Then, let's update the real ratios/soft volumes of all inputs
1879 * connected to this sink */
1880 compute_real_ratios(s
);
1883 /* Called from main thread. Only called for the root sink in shared volume
1884 * cases, except for internal recursive calls. */
1885 static void propagate_reference_volume(pa_sink
*s
) {
1889 pa_sink_assert_ref(s
);
1890 pa_assert_ctl_context();
1891 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1892 pa_assert(pa_sink_flat_volume_enabled(s
));
1894 /* This is called whenever the sink volume changes that is not
1895 * caused by a sink input volume change. We need to fix up the
1896 * sink input volumes accordingly */
1898 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1899 pa_cvolume old_volume
;
1901 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1902 propagate_reference_volume(i
->origin_sink
);
1904 /* Since the origin sink uses volume sharing, this input's volume
1905 * needs to be updated to match the root sink's real volume, but
1906 * that will be done later in update_shared_real_volume(). */
1910 old_volume
= i
->volume
;
1912 /* This basically calculates:
1914 * i->volume := s->reference_volume * i->reference_ratio */
1916 i
->volume
= s
->reference_volume
;
1917 pa_cvolume_remap(&i
->volume
, &s
->channel_map
, &i
->channel_map
);
1918 pa_sw_cvolume_multiply(&i
->volume
, &i
->volume
, &i
->reference_ratio
);
1920 /* The volume changed, let's tell people so */
1921 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
1923 if (i
->volume_changed
)
1924 i
->volume_changed(i
);
1926 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
1931 /* Called from main thread. Only called for the root sink in volume sharing
1932 * cases, except for internal recursive calls. The return value indicates
1933 * whether any reference volume actually changed. */
1934 static pa_bool_t
update_reference_volume(pa_sink
*s
, const pa_cvolume
*v
, const pa_channel_map
*channel_map
, pa_bool_t save
) {
1936 pa_bool_t reference_volume_changed
;
1940 pa_sink_assert_ref(s
);
1941 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1943 pa_assert(channel_map
);
1944 pa_assert(pa_cvolume_valid(v
));
1947 pa_cvolume_remap(&volume
, channel_map
, &s
->channel_map
);
1949 reference_volume_changed
= !pa_cvolume_equal(&volume
, &s
->reference_volume
);
1950 s
->reference_volume
= volume
;
1952 s
->save_volume
= (!reference_volume_changed
&& s
->save_volume
) || save
;
1954 if (reference_volume_changed
)
1955 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
1956 else if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1957 /* If the root sink's volume doesn't change, then there can't be any
1958 * changes in the other sinks in the sink tree either.
1960 * It's probably theoretically possible that even if the root sink's
1961 * volume changes slightly, some filter sink doesn't change its volume
1962 * due to rounding errors. If that happens, we still want to propagate
1963 * the changed root sink volume to the sinks connected to the
1964 * intermediate sink that didn't change its volume. This theoretical
1965 * possibility is the reason why we have that !(s->flags &
1966 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1967 * notice even if we returned here FALSE always if
1968 * reference_volume_changed is FALSE. */
1971 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1972 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1973 update_reference_volume(i
->origin_sink
, v
, channel_map
, FALSE
);
1979 /* Called from main thread */
1980 void pa_sink_set_volume(
1982 const pa_cvolume
*volume
,
1986 pa_cvolume new_reference_volume
;
1989 pa_sink_assert_ref(s
);
1990 pa_assert_ctl_context();
1991 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1992 pa_assert(!volume
|| pa_cvolume_valid(volume
));
1993 pa_assert(volume
|| pa_sink_flat_volume_enabled(s
));
1994 pa_assert(!volume
|| volume
->channels
== 1 || pa_cvolume_compatible(volume
, &s
->sample_spec
));
1996 /* make sure we don't change the volume when a PASSTHROUGH input is connected ...
1997 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1998 if (pa_sink_is_passthrough(s
) && (!volume
|| !pa_cvolume_is_norm(volume
))) {
1999 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
2003 /* In case of volume sharing, the volume is set for the root sink first,
2004 * from which it's then propagated to the sharing sinks. */
2005 root_sink
= pa_sink_get_master(s
);
2007 if (PA_UNLIKELY(!root_sink
))
2010 /* As a special exception we accept mono volumes on all sinks --
2011 * even on those with more complex channel maps */
2014 if (pa_cvolume_compatible(volume
, &s
->sample_spec
))
2015 new_reference_volume
= *volume
;
2017 new_reference_volume
= s
->reference_volume
;
2018 pa_cvolume_scale(&new_reference_volume
, pa_cvolume_max(volume
));
2021 pa_cvolume_remap(&new_reference_volume
, &s
->channel_map
, &root_sink
->channel_map
);
2023 if (update_reference_volume(root_sink
, &new_reference_volume
, &root_sink
->channel_map
, save
)) {
2024 if (pa_sink_flat_volume_enabled(root_sink
)) {
2025 /* OK, propagate this volume change back to the inputs */
2026 propagate_reference_volume(root_sink
);
2028 /* And now recalculate the real volume */
2029 compute_real_volume(root_sink
);
2031 update_real_volume(root_sink
, &root_sink
->reference_volume
, &root_sink
->channel_map
);
2035 /* If volume is NULL we synchronize the sink's real and
2036 * reference volumes with the stream volumes. */
2038 pa_assert(pa_sink_flat_volume_enabled(root_sink
));
2040 /* Ok, let's determine the new real volume */
2041 compute_real_volume(root_sink
);
2043 /* Let's 'push' the reference volume if necessary */
2044 pa_cvolume_merge(&new_reference_volume
, &s
->reference_volume
, &root_sink
->real_volume
);
2045 /* If the sink and it's root don't have the same number of channels, we need to remap */
2046 if (s
!= root_sink
&& !pa_channel_map_equal(&s
->channel_map
, &root_sink
->channel_map
))
2047 pa_cvolume_remap(&new_reference_volume
, &s
->channel_map
, &root_sink
->channel_map
);
2048 update_reference_volume(root_sink
, &new_reference_volume
, &root_sink
->channel_map
, save
);
2050 /* Now that the reference volume is updated, we can update the streams'
2051 * reference ratios. */
2052 compute_reference_ratios(root_sink
);
2055 if (root_sink
->set_volume
) {
2056 /* If we have a function set_volume(), then we do not apply a
2057 * soft volume by default. However, set_volume() is free to
2058 * apply one to root_sink->soft_volume */
2060 pa_cvolume_reset(&root_sink
->soft_volume
, root_sink
->sample_spec
.channels
);
2061 if (!(root_sink
->flags
& PA_SINK_DEFERRED_VOLUME
))
2062 root_sink
->set_volume(root_sink
);
2065 /* If we have no function set_volume(), then the soft volume
2066 * becomes the real volume */
2067 root_sink
->soft_volume
= root_sink
->real_volume
;
2069 /* This tells the sink that soft volume and/or real volume changed */
2071 pa_assert_se(pa_asyncmsgq_send(root_sink
->asyncmsgq
, PA_MSGOBJECT(root_sink
), PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
) == 0);
2074 /* Called from the io thread if sync volume is used, otherwise from the main thread.
2075 * Only to be called by sink implementor */
2076 void pa_sink_set_soft_volume(pa_sink
*s
, const pa_cvolume
*volume
) {
2078 pa_sink_assert_ref(s
);
2079 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
2081 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
)
2082 pa_sink_assert_io_context(s
);
2084 pa_assert_ctl_context();
2087 pa_cvolume_reset(&s
->soft_volume
, s
->sample_spec
.channels
);
2089 s
->soft_volume
= *volume
;
2091 if (PA_SINK_IS_LINKED(s
->state
) && !(s
->flags
& PA_SINK_DEFERRED_VOLUME
))
2092 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_VOLUME
, NULL
, 0, NULL
) == 0);
2094 s
->thread_info
.soft_volume
= s
->soft_volume
;
2097 /* Called from the main thread. Only called for the root sink in volume sharing
2098 * cases, except for internal recursive calls. */
2099 static void propagate_real_volume(pa_sink
*s
, const pa_cvolume
*old_real_volume
) {
2103 pa_sink_assert_ref(s
);
2104 pa_assert(old_real_volume
);
2105 pa_assert_ctl_context();
2106 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2108 /* This is called when the hardware's real volume changes due to
2109 * some external event. We copy the real volume into our
2110 * reference volume and then rebuild the stream volumes based on
2111 * i->real_ratio which should stay fixed. */
2113 if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
2114 if (pa_cvolume_equal(old_real_volume
, &s
->real_volume
))
2117 /* 1. Make the real volume the reference volume */
2118 update_reference_volume(s
, &s
->real_volume
, &s
->channel_map
, TRUE
);
2121 if (pa_sink_flat_volume_enabled(s
)) {
2123 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
2124 pa_cvolume old_volume
= i
->volume
;
2126 /* 2. Since the sink's reference and real volumes are equal
2127 * now our ratios should be too. */
2128 i
->reference_ratio
= i
->real_ratio
;
2130 /* 3. Recalculate the new stream reference volume based on the
2131 * reference ratio and the sink's reference volume.
2133 * This basically calculates:
2135 * i->volume = s->reference_volume * i->reference_ratio
2137 * This is identical to propagate_reference_volume() */
2138 i
->volume
= s
->reference_volume
;
2139 pa_cvolume_remap(&i
->volume
, &s
->channel_map
, &i
->channel_map
);
2140 pa_sw_cvolume_multiply(&i
->volume
, &i
->volume
, &i
->reference_ratio
);
2142 /* Notify if something changed */
2143 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
2145 if (i
->volume_changed
)
2146 i
->volume_changed(i
);
2148 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
2151 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
2152 propagate_real_volume(i
->origin_sink
, old_real_volume
);
2156 /* Something got changed in the hardware. It probably makes sense
2157 * to save changed hw settings given that hw volume changes not
2158 * triggered by PA are almost certainly done by the user. */
2159 if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
2160 s
->save_volume
= TRUE
;
2163 /* Called from io thread */
2164 void pa_sink_update_volume_and_mute(pa_sink
*s
) {
2166 pa_sink_assert_io_context(s
);
2168 pa_asyncmsgq_post(pa_thread_mq_get()->outq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE
, NULL
, 0, NULL
, NULL
);
2171 /* Called from main thread */
2172 const pa_cvolume
*pa_sink_get_volume(pa_sink
*s
, pa_bool_t force_refresh
) {
2173 pa_sink_assert_ref(s
);
2174 pa_assert_ctl_context();
2175 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2177 if (s
->refresh_volume
|| force_refresh
) {
2178 struct pa_cvolume old_real_volume
;
2180 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
2182 old_real_volume
= s
->real_volume
;
2184 if (!(s
->flags
& PA_SINK_DEFERRED_VOLUME
) && s
->get_volume
)
2187 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_VOLUME
, NULL
, 0, NULL
) == 0);
2189 update_real_volume(s
, &s
->real_volume
, &s
->channel_map
);
2190 propagate_real_volume(s
, &old_real_volume
);
2193 return &s
->reference_volume
;
2196 /* Called from main thread. In volume sharing cases, only the root sink may
2198 void pa_sink_volume_changed(pa_sink
*s
, const pa_cvolume
*new_real_volume
) {
2199 pa_cvolume old_real_volume
;
2201 pa_sink_assert_ref(s
);
2202 pa_assert_ctl_context();
2203 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2204 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
2206 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2208 old_real_volume
= s
->real_volume
;
2209 update_real_volume(s
, new_real_volume
, &s
->channel_map
);
2210 propagate_real_volume(s
, &old_real_volume
);
2213 /* Called from main thread */
2214 void pa_sink_set_mute(pa_sink
*s
, pa_bool_t mute
, pa_bool_t save
) {
2215 pa_bool_t old_muted
;
2217 pa_sink_assert_ref(s
);
2218 pa_assert_ctl_context();
2219 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2221 old_muted
= s
->muted
;
2223 s
->save_muted
= (old_muted
== s
->muted
&& s
->save_muted
) || save
;
2225 if (!(s
->flags
& PA_SINK_DEFERRED_VOLUME
) && s
->set_mute
)
2228 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MUTE
, NULL
, 0, NULL
) == 0);
2230 if (old_muted
!= s
->muted
)
2231 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2234 /* Called from main thread */
2235 pa_bool_t
pa_sink_get_mute(pa_sink
*s
, pa_bool_t force_refresh
) {
2237 pa_sink_assert_ref(s
);
2238 pa_assert_ctl_context();
2239 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2241 if (s
->refresh_muted
|| force_refresh
) {
2242 pa_bool_t old_muted
= s
->muted
;
2244 if (!(s
->flags
& PA_SINK_DEFERRED_VOLUME
) && s
->get_mute
)
2247 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MUTE
, NULL
, 0, NULL
) == 0);
2249 if (old_muted
!= s
->muted
) {
2250 s
->save_muted
= TRUE
;
2252 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2254 /* Make sure the soft mute status stays in sync */
2255 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MUTE
, NULL
, 0, NULL
) == 0);
2262 /* Called from main thread */
2263 void pa_sink_mute_changed(pa_sink
*s
, pa_bool_t new_muted
) {
2264 pa_sink_assert_ref(s
);
2265 pa_assert_ctl_context();
2266 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2268 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2270 if (s
->muted
== new_muted
)
2273 s
->muted
= new_muted
;
2274 s
->save_muted
= TRUE
;
2276 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2279 /* Called from main thread */
2280 pa_bool_t
pa_sink_update_proplist(pa_sink
*s
, pa_update_mode_t mode
, pa_proplist
*p
) {
2281 pa_sink_assert_ref(s
);
2282 pa_assert_ctl_context();
2285 pa_proplist_update(s
->proplist
, mode
, p
);
2287 if (PA_SINK_IS_LINKED(s
->state
)) {
2288 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PROPLIST_CHANGED
], s
);
2289 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2295 /* Called from main thread */
2296 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2297 void pa_sink_set_description(pa_sink
*s
, const char *description
) {
2299 pa_sink_assert_ref(s
);
2300 pa_assert_ctl_context();
2302 if (!description
&& !pa_proplist_contains(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
))
2305 old
= pa_proplist_gets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
2307 if (old
&& description
&& pa_streq(old
, description
))
2311 pa_proplist_sets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
, description
);
2313 pa_proplist_unset(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
2315 if (s
->monitor_source
) {
2318 n
= pa_sprintf_malloc("Monitor Source of %s", description
? description
: s
->name
);
2319 pa_source_set_description(s
->monitor_source
, n
);
2323 if (PA_SINK_IS_LINKED(s
->state
)) {
2324 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2325 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PROPLIST_CHANGED
], s
);
2329 /* Called from main thread */
2330 unsigned pa_sink_linked_by(pa_sink
*s
) {
2333 pa_sink_assert_ref(s
);
2334 pa_assert_ctl_context();
2335 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2337 ret
= pa_idxset_size(s
->inputs
);
2339 /* We add in the number of streams connected to us here. Please
2340 * note the asymmetry to pa_sink_used_by()! */
2342 if (s
->monitor_source
)
2343 ret
+= pa_source_linked_by(s
->monitor_source
);
2348 /* Called from main thread */
2349 unsigned pa_sink_used_by(pa_sink
*s
) {
2352 pa_sink_assert_ref(s
);
2353 pa_assert_ctl_context();
2354 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2356 ret
= pa_idxset_size(s
->inputs
);
2357 pa_assert(ret
>= s
->n_corked
);
2359 /* Streams connected to our monitor source do not matter for
2360 * pa_sink_used_by()!.*/
2362 return ret
- s
->n_corked
;
2365 /* Called from main thread */
2366 unsigned pa_sink_check_suspend(pa_sink
*s
) {
2371 pa_sink_assert_ref(s
);
2372 pa_assert_ctl_context();
2374 if (!PA_SINK_IS_LINKED(s
->state
))
2379 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
2380 pa_sink_input_state_t st
;
2382 st
= pa_sink_input_get_state(i
);
2384 /* We do not assert here. It is perfectly valid for a sink input to
2385 * be in the INIT state (i.e. created, marked done but not yet put)
2386 * and we should not care if it's unlinked as it won't contribute
2387 * towards our busy status.
2389 if (!PA_SINK_INPUT_IS_LINKED(st
))
2392 if (st
== PA_SINK_INPUT_CORKED
)
2395 if (i
->flags
& PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND
)
2401 if (s
->monitor_source
)
2402 ret
+= pa_source_check_suspend(s
->monitor_source
);
2407 /* Called from the IO thread */
2408 static void sync_input_volumes_within_thread(pa_sink
*s
) {
2412 pa_sink_assert_ref(s
);
2413 pa_sink_assert_io_context(s
);
2415 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
2416 if (pa_cvolume_equal(&i
->thread_info
.soft_volume
, &i
->soft_volume
))
2419 i
->thread_info
.soft_volume
= i
->soft_volume
;
2420 pa_sink_input_request_rewind(i
, 0, TRUE
, FALSE
, FALSE
);
2424 /* Called from the IO thread. Only called for the root sink in volume sharing
2425 * cases, except for internal recursive calls. */
2426 static void set_shared_volume_within_thread(pa_sink
*s
) {
2427 pa_sink_input
*i
= NULL
;
2430 pa_sink_assert_ref(s
);
2432 PA_MSGOBJECT(s
)->process_msg(PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_VOLUME_SYNCED
, NULL
, 0, NULL
);
2434 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
2435 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
2436 set_shared_volume_within_thread(i
->origin_sink
);
2440 /* Called from IO thread, except when it is not */
2441 int pa_sink_process_msg(pa_msgobject
*o
, int code
, void *userdata
, int64_t offset
, pa_memchunk
*chunk
) {
2442 pa_sink
*s
= PA_SINK(o
);
2443 pa_sink_assert_ref(s
);
2445 switch ((pa_sink_message_t
) code
) {
2447 case PA_SINK_MESSAGE_ADD_INPUT
: {
2448 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2450 /* If you change anything here, make sure to change the
2451 * sink input handling a few lines down at
2452 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2454 pa_hashmap_put(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
), pa_sink_input_ref(i
));
2456 /* Since the caller sleeps in pa_sink_input_put(), we can
2457 * safely access data outside of thread_info even though
2460 if ((i
->thread_info
.sync_prev
= i
->sync_prev
)) {
2461 pa_assert(i
->sink
== i
->thread_info
.sync_prev
->sink
);
2462 pa_assert(i
->sync_prev
->sync_next
== i
);
2463 i
->thread_info
.sync_prev
->thread_info
.sync_next
= i
;
2466 if ((i
->thread_info
.sync_next
= i
->sync_next
)) {
2467 pa_assert(i
->sink
== i
->thread_info
.sync_next
->sink
);
2468 pa_assert(i
->sync_next
->sync_prev
== i
);
2469 i
->thread_info
.sync_next
->thread_info
.sync_prev
= i
;
2472 pa_assert(!i
->thread_info
.attached
);
2473 i
->thread_info
.attached
= TRUE
;
2478 pa_sink_input_set_state_within_thread(i
, i
->state
);
2480 /* The requested latency of the sink input needs to be fixed up and
2481 * then configured on the sink. If this causes the sink latency to
2482 * go down, the sink implementor is responsible for doing a rewind
2483 * in the update_requested_latency() callback to ensure that the
2484 * sink buffer doesn't contain more data than what the new latency
2487 * XXX: Does it really make sense to push this responsibility to
2488 * the sink implementors? Wouldn't it be better to do it once in
2489 * the core than many times in the modules? */
2491 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1)
2492 pa_sink_input_set_requested_latency_within_thread(i
, i
->thread_info
.requested_sink_latency
);
2494 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
2495 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
2497 /* We don't rewind here automatically. This is left to the
2498 * sink input implementor because some sink inputs need a
2499 * slow start, i.e. need some time to buffer client
2500 * samples before beginning streaming.
2502 * XXX: Does it really make sense to push this functionality to
2503 * the sink implementors? Wouldn't it be better to do it once in
2504 * the core than many times in the modules? */
2506 /* In flat volume mode we need to update the volume as
2508 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2511 case PA_SINK_MESSAGE_REMOVE_INPUT
: {
2512 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2514 /* If you change anything here, make sure to change the
2515 * sink input handling a few lines down at
2516 * PA_SINK_MESSAGE_START_MOVE, too. */
2521 pa_sink_input_set_state_within_thread(i
, i
->state
);
2523 pa_assert(i
->thread_info
.attached
);
2524 i
->thread_info
.attached
= FALSE
;
2526 /* Since the caller sleeps in pa_sink_input_unlink(),
2527 * we can safely access data outside of thread_info even
2528 * though it is mutable */
2530 pa_assert(!i
->sync_prev
);
2531 pa_assert(!i
->sync_next
);
2533 if (i
->thread_info
.sync_prev
) {
2534 i
->thread_info
.sync_prev
->thread_info
.sync_next
= i
->thread_info
.sync_prev
->sync_next
;
2535 i
->thread_info
.sync_prev
= NULL
;
2538 if (i
->thread_info
.sync_next
) {
2539 i
->thread_info
.sync_next
->thread_info
.sync_prev
= i
->thread_info
.sync_next
->sync_prev
;
2540 i
->thread_info
.sync_next
= NULL
;
2543 if (pa_hashmap_remove(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
)))
2544 pa_sink_input_unref(i
);
2546 pa_sink_invalidate_requested_latency(s
, TRUE
);
2547 pa_sink_request_rewind(s
, (size_t) -1);
2549 /* In flat volume mode we need to update the volume as
2551 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2554 case PA_SINK_MESSAGE_START_MOVE
: {
2555 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2557 /* We don't support moving synchronized streams. */
2558 pa_assert(!i
->sync_prev
);
2559 pa_assert(!i
->sync_next
);
2560 pa_assert(!i
->thread_info
.sync_next
);
2561 pa_assert(!i
->thread_info
.sync_prev
);
2563 if (i
->thread_info
.state
!= PA_SINK_INPUT_CORKED
) {
2565 size_t sink_nbytes
, total_nbytes
;
2567 /* The old sink probably has some audio from this
2568 * stream in its buffer. We want to "take it back" as
2569 * much as possible and play it to the new sink. We
2570 * don't know at this point how much the old sink can
2571 * rewind. We have to pick something, and that
2572 * something is the full latency of the old sink here.
2573 * So we rewind the stream buffer by the sink latency
2574 * amount, which may be more than what we should
2575 * rewind. This can result in a chunk of audio being
2576 * played both to the old sink and the new sink.
2578 * FIXME: Fix this code so that we don't have to make
2579 * guesses about how much the sink will actually be
2580 * able to rewind. If someone comes up with a solution
2581 * for this, something to note is that the part of the
2582 * latency that the old sink couldn't rewind should
2583 * ideally be compensated after the stream has moved
2584 * to the new sink by adding silence. The new sink
2585 * most likely can't start playing the moved stream
2586 * immediately, and that gap should be removed from
2587 * the "compensation silence" (at least at the time of
2588 * writing this, the move finish code will actually
2589 * already take care of dropping the new sink's
2590 * unrewindable latency, so taking into account the
2591 * unrewindable latency of the old sink is the only
2594 * The render_memblockq contents are discarded,
2595 * because when the sink changes, the format of the
2596 * audio stored in the render_memblockq may change
2597 * too, making the stored audio invalid. FIXME:
2598 * However, the read and write indices are moved back
2599 * the same amount, so if they are not the same now,
2600 * they won't be the same after the rewind either. If
2601 * the write index of the render_memblockq is ahead of
2602 * the read index, then the render_memblockq will feed
2603 * the new sink some silence first, which it shouldn't
2604 * do. The write index should be flushed to be the
2605 * same as the read index. */
2607 /* Get the latency of the sink */
2608 usec
= pa_sink_get_latency_within_thread(s
);
2609 sink_nbytes
= pa_usec_to_bytes(usec
, &s
->sample_spec
);
2610 total_nbytes
= sink_nbytes
+ pa_memblockq_get_length(i
->thread_info
.render_memblockq
);
2612 if (total_nbytes
> 0) {
2613 i
->thread_info
.rewrite_nbytes
= i
->thread_info
.resampler
? pa_resampler_request(i
->thread_info
.resampler
, total_nbytes
) : total_nbytes
;
2614 i
->thread_info
.rewrite_flush
= TRUE
;
2615 pa_sink_input_process_rewind(i
, sink_nbytes
);
2622 pa_assert(i
->thread_info
.attached
);
2623 i
->thread_info
.attached
= FALSE
;
2625 /* Let's remove the sink input ...*/
2626 if (pa_hashmap_remove(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
)))
2627 pa_sink_input_unref(i
);
2629 pa_sink_invalidate_requested_latency(s
, TRUE
);
2631 pa_log_debug("Requesting rewind due to started move");
2632 pa_sink_request_rewind(s
, (size_t) -1);
2634 /* In flat volume mode we need to update the volume as
2636 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2639 case PA_SINK_MESSAGE_FINISH_MOVE
: {
2640 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2642 /* We don't support moving synchronized streams. */
2643 pa_assert(!i
->sync_prev
);
2644 pa_assert(!i
->sync_next
);
2645 pa_assert(!i
->thread_info
.sync_next
);
2646 pa_assert(!i
->thread_info
.sync_prev
);
2648 pa_hashmap_put(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
), pa_sink_input_ref(i
));
2650 pa_assert(!i
->thread_info
.attached
);
2651 i
->thread_info
.attached
= TRUE
;
2656 if (i
->thread_info
.state
!= PA_SINK_INPUT_CORKED
) {
2660 /* In the ideal case the new sink would start playing
2661 * the stream immediately. That requires the sink to
2662 * be able to rewind all of its latency, which usually
2663 * isn't possible, so there will probably be some gap
2664 * before the moved stream becomes audible. We then
2665 * have two possibilities: 1) start playing the stream
2666 * from where it is now, or 2) drop the unrewindable
2667 * latency of the sink from the stream. With option 1
2668 * we won't lose any audio but the stream will have a
2669 * pause. With option 2 we may lose some audio but the
2670 * stream time will be somewhat in sync with the wall
2671 * clock. Lennart seems to have chosen option 2 (one
2672 * of the reasons might have been that option 1 is
2673 * actually much harder to implement), so we drop the
2674 * latency of the new sink from the moved stream and
2675 * hope that the sink will undo most of that in the
2678 /* Get the latency of the sink */
2679 usec
= pa_sink_get_latency_within_thread(s
);
2680 nbytes
= pa_usec_to_bytes(usec
, &s
->sample_spec
);
2683 pa_sink_input_drop(i
, nbytes
);
2685 pa_log_debug("Requesting rewind due to finished move");
2686 pa_sink_request_rewind(s
, nbytes
);
2689 /* Updating the requested sink latency has to be done
2690 * after the sink rewind request, not before, because
2691 * otherwise the sink may limit the rewind amount
2694 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1)
2695 pa_sink_input_set_requested_latency_within_thread(i
, i
->thread_info
.requested_sink_latency
);
2697 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
2698 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
2700 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2703 case PA_SINK_MESSAGE_SET_SHARED_VOLUME
: {
2704 pa_sink
*root_sink
= pa_sink_get_master(s
);
2706 if (PA_LIKELY(root_sink
))
2707 set_shared_volume_within_thread(root_sink
);
2712 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED
:
2714 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
) {
2716 pa_sink_volume_change_push(s
);
2718 /* Fall through ... */
2720 case PA_SINK_MESSAGE_SET_VOLUME
:
2722 if (!pa_cvolume_equal(&s
->thread_info
.soft_volume
, &s
->soft_volume
)) {
2723 s
->thread_info
.soft_volume
= s
->soft_volume
;
2724 pa_sink_request_rewind(s
, (size_t) -1);
2727 /* Fall through ... */
2729 case PA_SINK_MESSAGE_SYNC_VOLUMES
:
2730 sync_input_volumes_within_thread(s
);
2733 case PA_SINK_MESSAGE_GET_VOLUME
:
2735 if ((s
->flags
& PA_SINK_DEFERRED_VOLUME
) && s
->get_volume
) {
2737 pa_sink_volume_change_flush(s
);
2738 pa_sw_cvolume_divide(&s
->thread_info
.current_hw_volume
, &s
->real_volume
, &s
->soft_volume
);
2741 /* In case sink implementor reset SW volume. */
2742 if (!pa_cvolume_equal(&s
->thread_info
.soft_volume
, &s
->soft_volume
)) {
2743 s
->thread_info
.soft_volume
= s
->soft_volume
;
2744 pa_sink_request_rewind(s
, (size_t) -1);
2749 case PA_SINK_MESSAGE_SET_MUTE
:
2751 if (s
->thread_info
.soft_muted
!= s
->muted
) {
2752 s
->thread_info
.soft_muted
= s
->muted
;
2753 pa_sink_request_rewind(s
, (size_t) -1);
2756 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
&& s
->set_mute
)
2761 case PA_SINK_MESSAGE_GET_MUTE
:
2763 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
&& s
->get_mute
)
2768 case PA_SINK_MESSAGE_SET_STATE
: {
2770 pa_bool_t suspend_change
=
2771 (s
->thread_info
.state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata
))) ||
2772 (PA_SINK_IS_OPENED(s
->thread_info
.state
) && PA_PTR_TO_UINT(userdata
) == PA_SINK_SUSPENDED
);
2774 s
->thread_info
.state
= PA_PTR_TO_UINT(userdata
);
2776 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
2777 s
->thread_info
.rewind_nbytes
= 0;
2778 s
->thread_info
.rewind_requested
= FALSE
;
2781 if (suspend_change
) {
2785 while ((i
= pa_hashmap_iterate(s
->thread_info
.inputs
, &state
, NULL
)))
2786 if (i
->suspend_within_thread
)
2787 i
->suspend_within_thread(i
, s
->thread_info
.state
== PA_SINK_SUSPENDED
);
2793 case PA_SINK_MESSAGE_DETACH
:
2795 /* Detach all streams */
2796 pa_sink_detach_within_thread(s
);
2799 case PA_SINK_MESSAGE_ATTACH
:
2801 /* Reattach all streams */
2802 pa_sink_attach_within_thread(s
);
2805 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY
: {
2807 pa_usec_t
*usec
= userdata
;
2808 *usec
= pa_sink_get_requested_latency_within_thread(s
);
2810 /* Yes, that's right, the IO thread will see -1 when no
2811 * explicit requested latency is configured, the main
2812 * thread will see max_latency */
2813 if (*usec
== (pa_usec_t
) -1)
2814 *usec
= s
->thread_info
.max_latency
;
2819 case PA_SINK_MESSAGE_SET_LATENCY_RANGE
: {
2820 pa_usec_t
*r
= userdata
;
2822 pa_sink_set_latency_range_within_thread(s
, r
[0], r
[1]);
2827 case PA_SINK_MESSAGE_GET_LATENCY_RANGE
: {
2828 pa_usec_t
*r
= userdata
;
2830 r
[0] = s
->thread_info
.min_latency
;
2831 r
[1] = s
->thread_info
.max_latency
;
2836 case PA_SINK_MESSAGE_GET_FIXED_LATENCY
:
2838 *((pa_usec_t
*) userdata
) = s
->thread_info
.fixed_latency
;
2841 case PA_SINK_MESSAGE_SET_FIXED_LATENCY
:
2843 pa_sink_set_fixed_latency_within_thread(s
, (pa_usec_t
) offset
);
2846 case PA_SINK_MESSAGE_GET_MAX_REWIND
:
2848 *((size_t*) userdata
) = s
->thread_info
.max_rewind
;
2851 case PA_SINK_MESSAGE_GET_MAX_REQUEST
:
2853 *((size_t*) userdata
) = s
->thread_info
.max_request
;
2856 case PA_SINK_MESSAGE_SET_MAX_REWIND
:
2858 pa_sink_set_max_rewind_within_thread(s
, (size_t) offset
);
2861 case PA_SINK_MESSAGE_SET_MAX_REQUEST
:
2863 pa_sink_set_max_request_within_thread(s
, (size_t) offset
);
2866 case PA_SINK_MESSAGE_SET_PORT
:
2868 pa_assert(userdata
);
2870 struct sink_message_set_port
*msg_data
= userdata
;
2871 msg_data
->ret
= s
->set_port(s
, msg_data
->port
);
2875 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE
:
2876 /* This message is sent from IO-thread and handled in main thread. */
2877 pa_assert_ctl_context();
2879 /* Make sure we're not messing with main thread when no longer linked */
2880 if (!PA_SINK_IS_LINKED(s
->state
))
2883 pa_sink_get_volume(s
, TRUE
);
2884 pa_sink_get_mute(s
, TRUE
);
2887 case PA_SINK_MESSAGE_SET_LATENCY_OFFSET
:
2888 s
->thread_info
.latency_offset
= offset
;
2891 case PA_SINK_MESSAGE_GET_LATENCY
:
2892 case PA_SINK_MESSAGE_MAX
:
2899 /* Called from main thread */
2900 int pa_sink_suspend_all(pa_core
*c
, pa_bool_t suspend
, pa_suspend_cause_t cause
) {
2905 pa_core_assert_ref(c
);
2906 pa_assert_ctl_context();
2907 pa_assert(cause
!= 0);
2909 PA_IDXSET_FOREACH(sink
, c
->sinks
, idx
) {
2912 if ((r
= pa_sink_suspend(sink
, suspend
, cause
)) < 0)
2919 /* Called from main thread */
2920 void pa_sink_detach(pa_sink
*s
) {
2921 pa_sink_assert_ref(s
);
2922 pa_assert_ctl_context();
2923 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2925 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_DETACH
, NULL
, 0, NULL
) == 0);
2928 /* Called from main thread */
2929 void pa_sink_attach(pa_sink
*s
) {
2930 pa_sink_assert_ref(s
);
2931 pa_assert_ctl_context();
2932 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2934 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_ATTACH
, NULL
, 0, NULL
) == 0);
2937 /* Called from IO thread */
2938 void pa_sink_detach_within_thread(pa_sink
*s
) {
2942 pa_sink_assert_ref(s
);
2943 pa_sink_assert_io_context(s
);
2944 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2946 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2950 if (s
->monitor_source
)
2951 pa_source_detach_within_thread(s
->monitor_source
);
2954 /* Called from IO thread */
2955 void pa_sink_attach_within_thread(pa_sink
*s
) {
2959 pa_sink_assert_ref(s
);
2960 pa_sink_assert_io_context(s
);
2961 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2963 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2967 if (s
->monitor_source
)
2968 pa_source_attach_within_thread(s
->monitor_source
);
2971 /* Called from IO thread */
2972 void pa_sink_request_rewind(pa_sink
*s
, size_t nbytes
) {
2973 pa_sink_assert_ref(s
);
2974 pa_sink_assert_io_context(s
);
2975 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2977 if (nbytes
== (size_t) -1)
2978 nbytes
= s
->thread_info
.max_rewind
;
2980 nbytes
= PA_MIN(nbytes
, s
->thread_info
.max_rewind
);
2982 if (s
->thread_info
.rewind_requested
&&
2983 nbytes
<= s
->thread_info
.rewind_nbytes
)
2986 s
->thread_info
.rewind_nbytes
= nbytes
;
2987 s
->thread_info
.rewind_requested
= TRUE
;
2989 if (s
->request_rewind
)
2990 s
->request_rewind(s
);
2993 /* Called from IO thread */
2994 pa_usec_t
pa_sink_get_requested_latency_within_thread(pa_sink
*s
) {
2995 pa_usec_t result
= (pa_usec_t
) -1;
2998 pa_usec_t monitor_latency
;
3000 pa_sink_assert_ref(s
);
3001 pa_sink_assert_io_context(s
);
3003 if (!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
))
3004 return PA_CLAMP(s
->thread_info
.fixed_latency
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
3006 if (s
->thread_info
.requested_latency_valid
)
3007 return s
->thread_info
.requested_latency
;
3009 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3010 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1 &&
3011 (result
== (pa_usec_t
) -1 || result
> i
->thread_info
.requested_sink_latency
))
3012 result
= i
->thread_info
.requested_sink_latency
;
3014 monitor_latency
= pa_source_get_requested_latency_within_thread(s
->monitor_source
);
3016 if (monitor_latency
!= (pa_usec_t
) -1 &&
3017 (result
== (pa_usec_t
) -1 || result
> monitor_latency
))
3018 result
= monitor_latency
;
3020 if (result
!= (pa_usec_t
) -1)
3021 result
= PA_CLAMP(result
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
3023 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
3024 /* Only cache if properly initialized */
3025 s
->thread_info
.requested_latency
= result
;
3026 s
->thread_info
.requested_latency_valid
= TRUE
;
3032 /* Called from main thread */
3033 pa_usec_t
pa_sink_get_requested_latency(pa_sink
*s
) {
3036 pa_sink_assert_ref(s
);
3037 pa_assert_ctl_context();
3038 pa_assert(PA_SINK_IS_LINKED(s
->state
));
3040 if (s
->state
== PA_SINK_SUSPENDED
)
3043 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY
, &usec
, 0, NULL
) == 0);
3048 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3049 void pa_sink_set_max_rewind_within_thread(pa_sink
*s
, size_t max_rewind
) {
3053 pa_sink_assert_ref(s
);
3054 pa_sink_assert_io_context(s
);
3056 if (max_rewind
== s
->thread_info
.max_rewind
)
3059 s
->thread_info
.max_rewind
= max_rewind
;
3061 if (PA_SINK_IS_LINKED(s
->thread_info
.state
))
3062 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3063 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
3065 if (s
->monitor_source
)
3066 pa_source_set_max_rewind_within_thread(s
->monitor_source
, s
->thread_info
.max_rewind
);
3069 /* Called from main thread */
3070 void pa_sink_set_max_rewind(pa_sink
*s
, size_t max_rewind
) {
3071 pa_sink_assert_ref(s
);
3072 pa_assert_ctl_context();
3074 if (PA_SINK_IS_LINKED(s
->state
))
3075 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MAX_REWIND
, NULL
, max_rewind
, NULL
) == 0);
3077 pa_sink_set_max_rewind_within_thread(s
, max_rewind
);
3080 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
3081 void pa_sink_set_max_request_within_thread(pa_sink
*s
, size_t max_request
) {
3084 pa_sink_assert_ref(s
);
3085 pa_sink_assert_io_context(s
);
3087 if (max_request
== s
->thread_info
.max_request
)
3090 s
->thread_info
.max_request
= max_request
;
3092 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
3095 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3096 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
3100 /* Called from main thread */
3101 void pa_sink_set_max_request(pa_sink
*s
, size_t max_request
) {
3102 pa_sink_assert_ref(s
);
3103 pa_assert_ctl_context();
3105 if (PA_SINK_IS_LINKED(s
->state
))
3106 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MAX_REQUEST
, NULL
, max_request
, NULL
) == 0);
3108 pa_sink_set_max_request_within_thread(s
, max_request
);
3111 /* Called from IO thread */
3112 void pa_sink_invalidate_requested_latency(pa_sink
*s
, pa_bool_t dynamic
) {
3116 pa_sink_assert_ref(s
);
3117 pa_sink_assert_io_context(s
);
3119 if ((s
->flags
& PA_SINK_DYNAMIC_LATENCY
))
3120 s
->thread_info
.requested_latency_valid
= FALSE
;
3124 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
3126 if (s
->update_requested_latency
)
3127 s
->update_requested_latency(s
);
3129 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3130 if (i
->update_sink_requested_latency
)
3131 i
->update_sink_requested_latency(i
);
3135 /* Called from main thread */
3136 void pa_sink_set_latency_range(pa_sink
*s
, pa_usec_t min_latency
, pa_usec_t max_latency
) {
3137 pa_sink_assert_ref(s
);
3138 pa_assert_ctl_context();
3140 /* min_latency == 0: no limit
3141 * min_latency anything else: specified limit
3143 * Similar for max_latency */
3145 if (min_latency
< ABSOLUTE_MIN_LATENCY
)
3146 min_latency
= ABSOLUTE_MIN_LATENCY
;
3148 if (max_latency
<= 0 ||
3149 max_latency
> ABSOLUTE_MAX_LATENCY
)
3150 max_latency
= ABSOLUTE_MAX_LATENCY
;
3152 pa_assert(min_latency
<= max_latency
);
3154 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3155 pa_assert((min_latency
== ABSOLUTE_MIN_LATENCY
&&
3156 max_latency
== ABSOLUTE_MAX_LATENCY
) ||
3157 (s
->flags
& PA_SINK_DYNAMIC_LATENCY
));
3159 if (PA_SINK_IS_LINKED(s
->state
)) {
3165 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_LATENCY_RANGE
, r
, 0, NULL
) == 0);
3167 pa_sink_set_latency_range_within_thread(s
, min_latency
, max_latency
);
3170 /* Called from main thread */
3171 void pa_sink_get_latency_range(pa_sink
*s
, pa_usec_t
*min_latency
, pa_usec_t
*max_latency
) {
3172 pa_sink_assert_ref(s
);
3173 pa_assert_ctl_context();
3174 pa_assert(min_latency
);
3175 pa_assert(max_latency
);
3177 if (PA_SINK_IS_LINKED(s
->state
)) {
3178 pa_usec_t r
[2] = { 0, 0 };
3180 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_LATENCY_RANGE
, r
, 0, NULL
) == 0);
3182 *min_latency
= r
[0];
3183 *max_latency
= r
[1];
3185 *min_latency
= s
->thread_info
.min_latency
;
3186 *max_latency
= s
->thread_info
.max_latency
;
3190 /* Called from IO thread */
3191 void pa_sink_set_latency_range_within_thread(pa_sink
*s
, pa_usec_t min_latency
, pa_usec_t max_latency
) {
3192 pa_sink_assert_ref(s
);
3193 pa_sink_assert_io_context(s
);
3195 pa_assert(min_latency
>= ABSOLUTE_MIN_LATENCY
);
3196 pa_assert(max_latency
<= ABSOLUTE_MAX_LATENCY
);
3197 pa_assert(min_latency
<= max_latency
);
3199 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
3200 pa_assert((min_latency
== ABSOLUTE_MIN_LATENCY
&&
3201 max_latency
== ABSOLUTE_MAX_LATENCY
) ||
3202 (s
->flags
& PA_SINK_DYNAMIC_LATENCY
));
3204 if (s
->thread_info
.min_latency
== min_latency
&&
3205 s
->thread_info
.max_latency
== max_latency
)
3208 s
->thread_info
.min_latency
= min_latency
;
3209 s
->thread_info
.max_latency
= max_latency
;
3211 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
3215 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3216 if (i
->update_sink_latency_range
)
3217 i
->update_sink_latency_range(i
);
3220 pa_sink_invalidate_requested_latency(s
, FALSE
);
3222 pa_source_set_latency_range_within_thread(s
->monitor_source
, min_latency
, max_latency
);
3225 /* Called from main thread */
3226 void pa_sink_set_fixed_latency(pa_sink
*s
, pa_usec_t latency
) {
3227 pa_sink_assert_ref(s
);
3228 pa_assert_ctl_context();
3230 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
) {
3231 pa_assert(latency
== 0);
3235 if (latency
< ABSOLUTE_MIN_LATENCY
)
3236 latency
= ABSOLUTE_MIN_LATENCY
;
3238 if (latency
> ABSOLUTE_MAX_LATENCY
)
3239 latency
= ABSOLUTE_MAX_LATENCY
;
3241 if (PA_SINK_IS_LINKED(s
->state
))
3242 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_FIXED_LATENCY
, NULL
, (int64_t) latency
, NULL
) == 0);
3244 s
->thread_info
.fixed_latency
= latency
;
3246 pa_source_set_fixed_latency(s
->monitor_source
, latency
);
3249 /* Called from main thread */
3250 pa_usec_t
pa_sink_get_fixed_latency(pa_sink
*s
) {
3253 pa_sink_assert_ref(s
);
3254 pa_assert_ctl_context();
3256 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
)
3259 if (PA_SINK_IS_LINKED(s
->state
))
3260 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_FIXED_LATENCY
, &latency
, 0, NULL
) == 0);
3262 latency
= s
->thread_info
.fixed_latency
;
3267 /* Called from IO thread */
3268 void pa_sink_set_fixed_latency_within_thread(pa_sink
*s
, pa_usec_t latency
) {
3269 pa_sink_assert_ref(s
);
3270 pa_sink_assert_io_context(s
);
3272 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
) {
3273 pa_assert(latency
== 0);
3274 s
->thread_info
.fixed_latency
= 0;
3276 if (s
->monitor_source
)
3277 pa_source_set_fixed_latency_within_thread(s
->monitor_source
, 0);
3282 pa_assert(latency
>= ABSOLUTE_MIN_LATENCY
);
3283 pa_assert(latency
<= ABSOLUTE_MAX_LATENCY
);
3285 if (s
->thread_info
.fixed_latency
== latency
)
3288 s
->thread_info
.fixed_latency
= latency
;
3290 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
3294 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
3295 if (i
->update_sink_fixed_latency
)
3296 i
->update_sink_fixed_latency(i
);
3299 pa_sink_invalidate_requested_latency(s
, FALSE
);
3301 pa_source_set_fixed_latency_within_thread(s
->monitor_source
, latency
);
3304 /* Called from main context */
3305 void pa_sink_set_latency_offset(pa_sink
*s
, int64_t offset
) {
3306 pa_sink_assert_ref(s
);
3308 s
->latency_offset
= offset
;
3310 if (PA_SINK_IS_LINKED(s
->state
))
3311 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_LATENCY_OFFSET
, NULL
, offset
, NULL
) == 0);
3313 s
->thread_info
.latency_offset
= offset
;
3316 /* Called from main context */
3317 size_t pa_sink_get_max_rewind(pa_sink
*s
) {
3319 pa_assert_ctl_context();
3320 pa_sink_assert_ref(s
);
3322 if (!PA_SINK_IS_LINKED(s
->state
))
3323 return s
->thread_info
.max_rewind
;
3325 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MAX_REWIND
, &r
, 0, NULL
) == 0);
3330 /* Called from main context */
3331 size_t pa_sink_get_max_request(pa_sink
*s
) {
3333 pa_sink_assert_ref(s
);
3334 pa_assert_ctl_context();
3336 if (!PA_SINK_IS_LINKED(s
->state
))
3337 return s
->thread_info
.max_request
;
3339 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MAX_REQUEST
, &r
, 0, NULL
) == 0);
3344 /* Called from main context */
3345 int pa_sink_set_port(pa_sink
*s
, const char *name
, pa_bool_t save
) {
3346 pa_device_port
*port
;
3349 pa_sink_assert_ref(s
);
3350 pa_assert_ctl_context();
3353 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s
->index
, s
->name
);
3354 return -PA_ERR_NOTIMPLEMENTED
;
3358 return -PA_ERR_NOENTITY
;
3360 if (!(port
= pa_hashmap_get(s
->ports
, name
)))
3361 return -PA_ERR_NOENTITY
;
3363 if (s
->active_port
== port
) {
3364 s
->save_port
= s
->save_port
|| save
;
3368 if (s
->flags
& PA_SINK_DEFERRED_VOLUME
) {
3369 struct sink_message_set_port msg
= { .port
= port
, .ret
= 0 };
3370 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_PORT
, &msg
, 0, NULL
) == 0);
3374 ret
= s
->set_port(s
, port
);
3377 return -PA_ERR_NOENTITY
;
3379 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
3381 pa_log_info("Changed port of sink %u \"%s\" to %s", s
->index
, s
->name
, port
->name
);
3383 s
->active_port
= port
;
3384 s
->save_port
= save
;
3386 pa_sink_set_latency_offset(s
, s
->active_port
->latency_offset
);
3388 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PORT_CHANGED
], s
);
3393 pa_bool_t
pa_device_init_icon(pa_proplist
*p
, pa_bool_t is_sink
) {
3394 const char *ff
, *c
, *t
= NULL
, *s
= "", *profile
, *bus
;
3398 if (pa_proplist_contains(p
, PA_PROP_DEVICE_ICON_NAME
))
3401 if ((ff
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
))) {
3403 if (pa_streq(ff
, "microphone"))
3404 t
= "audio-input-microphone";
3405 else if (pa_streq(ff
, "webcam"))
3407 else if (pa_streq(ff
, "computer"))
3409 else if (pa_streq(ff
, "handset"))
3411 else if (pa_streq(ff
, "portable"))
3412 t
= "multimedia-player";
3413 else if (pa_streq(ff
, "tv"))
3414 t
= "video-display";
3417 * The following icons are not part of the icon naming spec,
3418 * because Rodney Dawes sucks as the maintainer of that spec.
3420 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3422 else if (pa_streq(ff
, "headset"))
3423 t
= "audio-headset";
3424 else if (pa_streq(ff
, "headphone"))
3425 t
= "audio-headphones";
3426 else if (pa_streq(ff
, "speaker"))
3427 t
= "audio-speakers";
3428 else if (pa_streq(ff
, "hands-free"))
3429 t
= "audio-handsfree";
3433 if ((c
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
)))
3434 if (pa_streq(c
, "modem"))
3441 t
= "audio-input-microphone";
3444 if ((profile
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_NAME
))) {
3445 if (strstr(profile
, "analog"))
3447 else if (strstr(profile
, "iec958"))
3449 else if (strstr(profile
, "hdmi"))
3453 bus
= pa_proplist_gets(p
, PA_PROP_DEVICE_BUS
);
3455 pa_proplist_setf(p
, PA_PROP_DEVICE_ICON_NAME
, "%s%s%s%s", t
, pa_strempty(s
), bus
? "-" : "", pa_strempty(bus
));
3460 pa_bool_t
pa_device_init_description(pa_proplist
*p
) {
3461 const char *s
, *d
= NULL
, *k
;
3464 if (pa_proplist_contains(p
, PA_PROP_DEVICE_DESCRIPTION
))
3467 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
)))
3468 if (pa_streq(s
, "internal"))
3469 d
= _("Built-in Audio");
3472 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
)))
3473 if (pa_streq(s
, "modem"))
3477 d
= pa_proplist_gets(p
, PA_PROP_DEVICE_PRODUCT_NAME
);
3482 k
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
);
3485 pa_proplist_setf(p
, PA_PROP_DEVICE_DESCRIPTION
, "%s %s", d
, k
);
3487 pa_proplist_sets(p
, PA_PROP_DEVICE_DESCRIPTION
, d
);
3492 pa_bool_t
pa_device_init_intended_roles(pa_proplist
*p
) {
3496 if (pa_proplist_contains(p
, PA_PROP_DEVICE_INTENDED_ROLES
))
3499 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
)))
3500 if (pa_streq(s
, "handset") || pa_streq(s
, "hands-free")
3501 || pa_streq(s
, "headset")) {
3502 pa_proplist_sets(p
, PA_PROP_DEVICE_INTENDED_ROLES
, "phone");
3509 unsigned pa_device_init_priority(pa_proplist
*p
) {
3511 unsigned priority
= 0;
3515 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
))) {
3517 if (pa_streq(s
, "sound"))
3519 else if (!pa_streq(s
, "modem"))
3523 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
))) {
3525 if (pa_streq(s
, "internal"))
3527 else if (pa_streq(s
, "speaker"))
3529 else if (pa_streq(s
, "headphone"))
3533 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_BUS
))) {
3535 if (pa_streq(s
, "pci"))
3537 else if (pa_streq(s
, "usb"))
3539 else if (pa_streq(s
, "bluetooth"))
3543 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_NAME
))) {
3545 if (pa_startswith(s
, "analog-"))
3547 else if (pa_startswith(s
, "iec958-"))
3554 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change
, 0, pa_xfree
);
3556 /* Called from the IO thread. */
3557 static pa_sink_volume_change
*pa_sink_volume_change_new(pa_sink
*s
) {
3558 pa_sink_volume_change
*c
;
3559 if (!(c
= pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change
))))
3560 c
= pa_xnew(pa_sink_volume_change
, 1);
3562 PA_LLIST_INIT(pa_sink_volume_change
, c
);
3564 pa_cvolume_reset(&c
->hw_volume
, s
->sample_spec
.channels
);
3568 /* Called from the IO thread. */
3569 static void pa_sink_volume_change_free(pa_sink_volume_change
*c
) {
3571 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change
), c
) < 0)
3575 /* Called from the IO thread. */
3576 void pa_sink_volume_change_push(pa_sink
*s
) {
3577 pa_sink_volume_change
*c
= NULL
;
3578 pa_sink_volume_change
*nc
= NULL
;
3579 uint32_t safety_margin
= s
->thread_info
.volume_change_safety_margin
;
3581 const char *direction
= NULL
;
3584 nc
= pa_sink_volume_change_new(s
);
3586 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3587 * Adding one more volume for HW would get us rid of this, but I am trying
3588 * to survive with the ones we already have. */
3589 pa_sw_cvolume_divide(&nc
->hw_volume
, &s
->real_volume
, &s
->soft_volume
);
3591 if (!s
->thread_info
.volume_changes
&& pa_cvolume_equal(&nc
->hw_volume
, &s
->thread_info
.current_hw_volume
)) {
3592 pa_log_debug("Volume not changing");
3593 pa_sink_volume_change_free(nc
);
3597 nc
->at
= pa_sink_get_latency_within_thread(s
);
3598 nc
->at
+= pa_rtclock_now() + s
->thread_info
.volume_change_extra_delay
;
3600 if (s
->thread_info
.volume_changes_tail
) {
3601 for (c
= s
->thread_info
.volume_changes_tail
; c
; c
= c
->prev
) {
3602 /* If volume is going up let's do it a bit late. If it is going
3603 * down let's do it a bit early. */
3604 if (pa_cvolume_avg(&nc
->hw_volume
) > pa_cvolume_avg(&c
->hw_volume
)) {
3605 if (nc
->at
+ safety_margin
> c
->at
) {
3606 nc
->at
+= safety_margin
;
3611 else if (nc
->at
- safety_margin
> c
->at
) {
3612 nc
->at
-= safety_margin
;
3620 if (pa_cvolume_avg(&nc
->hw_volume
) > pa_cvolume_avg(&s
->thread_info
.current_hw_volume
)) {
3621 nc
->at
+= safety_margin
;
3624 nc
->at
-= safety_margin
;
3627 PA_LLIST_PREPEND(pa_sink_volume_change
, s
->thread_info
.volume_changes
, nc
);
3630 PA_LLIST_INSERT_AFTER(pa_sink_volume_change
, s
->thread_info
.volume_changes
, c
, nc
);
3633 pa_log_debug("Volume going %s to %d at %llu", direction
, pa_cvolume_avg(&nc
->hw_volume
), (long long unsigned) nc
->at
);
3635 /* We can ignore volume events that came earlier but should happen later than this. */
3636 PA_LLIST_FOREACH(c
, nc
->next
) {
3637 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c
->hw_volume
), (long long unsigned) c
->at
);
3638 pa_sink_volume_change_free(c
);
3641 s
->thread_info
.volume_changes_tail
= nc
;
3644 /* Called from the IO thread. */
3645 static void pa_sink_volume_change_flush(pa_sink
*s
) {
3646 pa_sink_volume_change
*c
= s
->thread_info
.volume_changes
;
3648 s
->thread_info
.volume_changes
= NULL
;
3649 s
->thread_info
.volume_changes_tail
= NULL
;
3651 pa_sink_volume_change
*next
= c
->next
;
3652 pa_sink_volume_change_free(c
);
3657 /* Called from the IO thread. */
3658 pa_bool_t
pa_sink_volume_change_apply(pa_sink
*s
, pa_usec_t
*usec_to_next
) {
3660 pa_bool_t ret
= FALSE
;
3664 if (!s
->thread_info
.volume_changes
|| !PA_SINK_IS_LINKED(s
->state
)) {
3670 pa_assert(s
->write_volume
);
3672 now
= pa_rtclock_now();
3674 while (s
->thread_info
.volume_changes
&& now
>= s
->thread_info
.volume_changes
->at
) {
3675 pa_sink_volume_change
*c
= s
->thread_info
.volume_changes
;
3676 PA_LLIST_REMOVE(pa_sink_volume_change
, s
->thread_info
.volume_changes
, c
);
3677 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3678 pa_cvolume_avg(&c
->hw_volume
), (long long unsigned) c
->at
, (long long unsigned) (now
- c
->at
));
3680 s
->thread_info
.current_hw_volume
= c
->hw_volume
;
3681 pa_sink_volume_change_free(c
);
3687 if (s
->thread_info
.volume_changes
) {
3689 *usec_to_next
= s
->thread_info
.volume_changes
->at
- now
;
3690 if (pa_log_ratelimit(PA_LOG_DEBUG
))
3691 pa_log_debug("Next volume change in %lld usec", (long long) (s
->thread_info
.volume_changes
->at
- now
));
3696 s
->thread_info
.volume_changes_tail
= NULL
;
3701 /* Called from the IO thread. */
3702 static void pa_sink_volume_change_rewind(pa_sink
*s
, size_t nbytes
) {
3703 /* All the queued volume events later than current latency are shifted to happen earlier. */
3704 pa_sink_volume_change
*c
;
3705 pa_volume_t prev_vol
= pa_cvolume_avg(&s
->thread_info
.current_hw_volume
);
3706 pa_usec_t rewound
= pa_bytes_to_usec(nbytes
, &s
->sample_spec
);
3707 pa_usec_t limit
= pa_sink_get_latency_within_thread(s
);
3709 pa_log_debug("latency = %lld", (long long) limit
);
3710 limit
+= pa_rtclock_now() + s
->thread_info
.volume_change_extra_delay
;
3712 PA_LLIST_FOREACH(c
, s
->thread_info
.volume_changes
) {
3713 pa_usec_t modified_limit
= limit
;
3714 if (prev_vol
> pa_cvolume_avg(&c
->hw_volume
))
3715 modified_limit
-= s
->thread_info
.volume_change_safety_margin
;
3717 modified_limit
+= s
->thread_info
.volume_change_safety_margin
;
3718 if (c
->at
> modified_limit
) {
3720 if (c
->at
< modified_limit
)
3721 c
->at
= modified_limit
;
3723 prev_vol
= pa_cvolume_avg(&c
->hw_volume
);
3725 pa_sink_volume_change_apply(s
, NULL
);
3728 /* Called from the main thread */
3729 /* Gets the list of formats supported by the sink. The members and idxset must
3730 * be freed by the caller. */
3731 pa_idxset
* pa_sink_get_formats(pa_sink
*s
) {
3736 if (s
->get_formats
) {
3737 /* Sink supports format query, all is good */
3738 ret
= s
->get_formats(s
);
3740 /* Sink doesn't support format query, so assume it does PCM */
3741 pa_format_info
*f
= pa_format_info_new();
3742 f
->encoding
= PA_ENCODING_PCM
;
3744 ret
= pa_idxset_new(NULL
, NULL
);
3745 pa_idxset_put(ret
, f
, NULL
);
3751 /* Called from the main thread */
3752 /* Allows an external source to set what formats a sink supports if the sink
3753 * permits this. The function makes a copy of the formats on success. */
3754 pa_bool_t
pa_sink_set_formats(pa_sink
*s
, pa_idxset
*formats
) {
3759 /* Sink supports setting formats -- let's give it a shot */
3760 return s
->set_formats(s
, formats
);
3762 /* Sink doesn't support setting this -- bail out */
3766 /* Called from the main thread */
3767 /* Checks if the sink can accept this format */
3768 pa_bool_t
pa_sink_check_format(pa_sink
*s
, pa_format_info
*f
)
3770 pa_idxset
*formats
= NULL
;
3771 pa_bool_t ret
= FALSE
;
3776 formats
= pa_sink_get_formats(s
);
3779 pa_format_info
*finfo_device
;
3782 PA_IDXSET_FOREACH(finfo_device
, formats
, i
) {
3783 if (pa_format_info_is_compatible(finfo_device
, f
)) {
3789 pa_idxset_free(formats
, (pa_free_cb_t
) pa_format_info_free
);
3795 /* Called from the main thread */
3796 /* Calculates the intersection between formats supported by the sink and
3797 * in_formats, and returns these, in the order of the sink's formats. */
3798 pa_idxset
* pa_sink_check_formats(pa_sink
*s
, pa_idxset
*in_formats
) {
3799 pa_idxset
*out_formats
= pa_idxset_new(NULL
, NULL
), *sink_formats
= NULL
;
3800 pa_format_info
*f_sink
, *f_in
;
3805 if (!in_formats
|| pa_idxset_isempty(in_formats
))
3808 sink_formats
= pa_sink_get_formats(s
);
3810 PA_IDXSET_FOREACH(f_sink
, sink_formats
, i
) {
3811 PA_IDXSET_FOREACH(f_in
, in_formats
, j
) {
3812 if (pa_format_info_is_compatible(f_sink
, f_in
))
3813 pa_idxset_put(out_formats
, pa_format_info_copy(f_in
), NULL
);
3819 pa_idxset_free(sink_formats
, (pa_free_cb_t
) pa_format_info_free
);