2 This file is part of PulseAudio.
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
31 #include <pulse/introspect.h>
32 #include <pulse/format.h>
33 #include <pulse/utf8.h>
34 #include <pulse/xmalloc.h>
35 #include <pulse/timeval.h>
36 #include <pulse/util.h>
37 #include <pulse/rtclock.h>
38 #include <pulse/internal.h>
40 #include <pulsecore/i18n.h>
41 #include <pulsecore/sink-input.h>
42 #include <pulsecore/namereg.h>
43 #include <pulsecore/core-util.h>
44 #include <pulsecore/sample-util.h>
45 #include <pulsecore/core-subscribe.h>
46 #include <pulsecore/log.h>
47 #include <pulsecore/macro.h>
48 #include <pulsecore/play-memblockq.h>
49 #include <pulsecore/flist.h>
53 #define MAX_MIX_CHANNELS 32
54 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
55 #define ABSOLUTE_MIN_LATENCY (500)
56 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
57 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
59 PA_DEFINE_PUBLIC_CLASS(pa_sink
, pa_msgobject
);
61 struct pa_sink_volume_change
{
65 PA_LLIST_FIELDS(pa_sink_volume_change
);
68 struct sink_message_set_port
{
73 static void sink_free(pa_object
*s
);
75 static void pa_sink_volume_change_push(pa_sink
*s
);
76 static void pa_sink_volume_change_flush(pa_sink
*s
);
77 static void pa_sink_volume_change_rewind(pa_sink
*s
, size_t nbytes
);
79 pa_sink_new_data
* pa_sink_new_data_init(pa_sink_new_data
*data
) {
83 data
->proplist
= pa_proplist_new();
88 void pa_sink_new_data_set_name(pa_sink_new_data
*data
, const char *name
) {
92 data
->name
= pa_xstrdup(name
);
95 void pa_sink_new_data_set_sample_spec(pa_sink_new_data
*data
, const pa_sample_spec
*spec
) {
98 if ((data
->sample_spec_is_set
= !!spec
))
99 data
->sample_spec
= *spec
;
102 void pa_sink_new_data_set_channel_map(pa_sink_new_data
*data
, const pa_channel_map
*map
) {
105 if ((data
->channel_map_is_set
= !!map
))
106 data
->channel_map
= *map
;
109 void pa_sink_new_data_set_volume(pa_sink_new_data
*data
, const pa_cvolume
*volume
) {
112 if ((data
->volume_is_set
= !!volume
))
113 data
->volume
= *volume
;
116 void pa_sink_new_data_set_muted(pa_sink_new_data
*data
, pa_bool_t mute
) {
119 data
->muted_is_set
= TRUE
;
120 data
->muted
= !!mute
;
123 void pa_sink_new_data_set_port(pa_sink_new_data
*data
, const char *port
) {
126 pa_xfree(data
->active_port
);
127 data
->active_port
= pa_xstrdup(port
);
130 void pa_sink_new_data_done(pa_sink_new_data
*data
) {
133 pa_proplist_free(data
->proplist
);
138 while ((p
= pa_hashmap_steal_first(data
->ports
)))
139 pa_device_port_free(p
);
141 pa_hashmap_free(data
->ports
, NULL
, NULL
);
144 pa_xfree(data
->name
);
145 pa_xfree(data
->active_port
);
148 pa_device_port
*pa_device_port_new(const char *name
, const char *description
, size_t extra
) {
153 p
= pa_xmalloc(PA_ALIGN(sizeof(pa_device_port
)) + extra
);
154 p
->name
= pa_xstrdup(name
);
155 p
->description
= pa_xstrdup(description
);
162 void pa_device_port_free(pa_device_port
*p
) {
166 pa_xfree(p
->description
);
170 /* Called from main context */
171 static void reset_callbacks(pa_sink
*s
) {
175 s
->get_volume
= NULL
;
176 s
->set_volume
= NULL
;
179 s
->request_rewind
= NULL
;
180 s
->update_requested_latency
= NULL
;
182 s
->get_formats
= NULL
;
185 /* Called from main context */
186 pa_sink
* pa_sink_new(
188 pa_sink_new_data
*data
,
189 pa_sink_flags_t flags
) {
193 char st
[PA_SAMPLE_SPEC_SNPRINT_MAX
], cm
[PA_CHANNEL_MAP_SNPRINT_MAX
];
194 pa_source_new_data source_data
;
200 pa_assert(data
->name
);
201 pa_assert_ctl_context();
203 s
= pa_msgobject_new(pa_sink
);
205 if (!(name
= pa_namereg_register(core
, data
->name
, PA_NAMEREG_SINK
, s
, data
->namereg_fail
))) {
206 pa_log_debug("Failed to register name %s.", data
->name
);
211 pa_sink_new_data_set_name(data
, name
);
213 if (pa_hook_fire(&core
->hooks
[PA_CORE_HOOK_SINK_NEW
], data
) < 0) {
215 pa_namereg_unregister(core
, name
);
219 /* FIXME, need to free s here on failure */
221 pa_return_null_if_fail(!data
->driver
|| pa_utf8_valid(data
->driver
));
222 pa_return_null_if_fail(data
->name
&& pa_utf8_valid(data
->name
) && data
->name
[0]);
224 pa_return_null_if_fail(data
->sample_spec_is_set
&& pa_sample_spec_valid(&data
->sample_spec
));
226 if (!data
->channel_map_is_set
)
227 pa_return_null_if_fail(pa_channel_map_init_auto(&data
->channel_map
, data
->sample_spec
.channels
, PA_CHANNEL_MAP_DEFAULT
));
229 pa_return_null_if_fail(pa_channel_map_valid(&data
->channel_map
));
230 pa_return_null_if_fail(data
->channel_map
.channels
== data
->sample_spec
.channels
);
232 /* FIXME: There should probably be a general function for checking whether
233 * the sink volume is allowed to be set, like there is for sink inputs. */
234 pa_assert(!data
->volume_is_set
|| !(flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
236 if (!data
->volume_is_set
) {
237 pa_cvolume_reset(&data
->volume
, data
->sample_spec
.channels
);
238 data
->save_volume
= FALSE
;
241 pa_return_null_if_fail(pa_cvolume_valid(&data
->volume
));
242 pa_return_null_if_fail(pa_cvolume_compatible(&data
->volume
, &data
->sample_spec
));
244 if (!data
->muted_is_set
)
248 pa_proplist_update(data
->proplist
, PA_UPDATE_MERGE
, data
->card
->proplist
);
250 pa_device_init_description(data
->proplist
);
251 pa_device_init_icon(data
->proplist
, TRUE
);
252 pa_device_init_intended_roles(data
->proplist
);
254 if (pa_hook_fire(&core
->hooks
[PA_CORE_HOOK_SINK_FIXATE
], data
) < 0) {
256 pa_namereg_unregister(core
, name
);
260 s
->parent
.parent
.free
= sink_free
;
261 s
->parent
.process_msg
= pa_sink_process_msg
;
264 s
->state
= PA_SINK_INIT
;
267 s
->suspend_cause
= 0;
268 s
->name
= pa_xstrdup(name
);
269 s
->proplist
= pa_proplist_copy(data
->proplist
);
270 s
->driver
= pa_xstrdup(pa_path_get_filename(data
->driver
));
271 s
->module
= data
->module
;
272 s
->card
= data
->card
;
274 s
->priority
= pa_device_init_priority(s
->proplist
);
276 s
->sample_spec
= data
->sample_spec
;
277 s
->channel_map
= data
->channel_map
;
279 s
->inputs
= pa_idxset_new(NULL
, NULL
);
281 s
->input_to_master
= NULL
;
283 s
->reference_volume
= s
->real_volume
= data
->volume
;
284 pa_cvolume_reset(&s
->soft_volume
, s
->sample_spec
.channels
);
285 s
->base_volume
= PA_VOLUME_NORM
;
286 s
->n_volume_steps
= PA_VOLUME_NORM
+1;
287 s
->muted
= data
->muted
;
288 s
->refresh_volume
= s
->refresh_muted
= FALSE
;
295 /* As a minor optimization we just steal the list instead of
297 s
->ports
= data
->ports
;
300 s
->active_port
= NULL
;
301 s
->save_port
= FALSE
;
303 if (data
->active_port
&& s
->ports
)
304 if ((s
->active_port
= pa_hashmap_get(s
->ports
, data
->active_port
)))
305 s
->save_port
= data
->save_port
;
307 if (!s
->active_port
&& s
->ports
) {
311 PA_HASHMAP_FOREACH(p
, s
->ports
, state
)
312 if (!s
->active_port
|| p
->priority
> s
->active_port
->priority
)
316 s
->save_volume
= data
->save_volume
;
317 s
->save_muted
= data
->save_muted
;
319 pa_silence_memchunk_get(
320 &core
->silence_cache
,
326 s
->thread_info
.rtpoll
= NULL
;
327 s
->thread_info
.inputs
= pa_hashmap_new(pa_idxset_trivial_hash_func
, pa_idxset_trivial_compare_func
);
328 s
->thread_info
.soft_volume
= s
->soft_volume
;
329 s
->thread_info
.soft_muted
= s
->muted
;
330 s
->thread_info
.state
= s
->state
;
331 s
->thread_info
.rewind_nbytes
= 0;
332 s
->thread_info
.rewind_requested
= FALSE
;
333 s
->thread_info
.max_rewind
= 0;
334 s
->thread_info
.max_request
= 0;
335 s
->thread_info
.requested_latency_valid
= FALSE
;
336 s
->thread_info
.requested_latency
= 0;
337 s
->thread_info
.min_latency
= ABSOLUTE_MIN_LATENCY
;
338 s
->thread_info
.max_latency
= ABSOLUTE_MAX_LATENCY
;
339 s
->thread_info
.fixed_latency
= flags
& PA_SINK_DYNAMIC_LATENCY
? 0 : DEFAULT_FIXED_LATENCY
;
341 PA_LLIST_HEAD_INIT(pa_sink_volume_change
, s
->thread_info
.volume_changes
);
342 s
->thread_info
.volume_changes_tail
= NULL
;
343 pa_sw_cvolume_multiply(&s
->thread_info
.current_hw_volume
, &s
->soft_volume
, &s
->real_volume
);
344 s
->thread_info
.volume_change_safety_margin
= core
->sync_volume_safety_margin_usec
;
345 s
->thread_info
.volume_change_extra_delay
= core
->sync_volume_extra_delay_usec
;
347 /* FIXME: This should probably be moved to pa_sink_put() */
348 pa_assert_se(pa_idxset_put(core
->sinks
, s
, &s
->index
) >= 0);
351 pa_assert_se(pa_idxset_put(s
->card
->sinks
, s
, NULL
) >= 0);
353 pt
= pa_proplist_to_string_sep(s
->proplist
, "\n ");
354 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
357 pa_sample_spec_snprint(st
, sizeof(st
), &s
->sample_spec
),
358 pa_channel_map_snprint(cm
, sizeof(cm
), &s
->channel_map
),
362 pa_source_new_data_init(&source_data
);
363 pa_source_new_data_set_sample_spec(&source_data
, &s
->sample_spec
);
364 pa_source_new_data_set_channel_map(&source_data
, &s
->channel_map
);
365 source_data
.name
= pa_sprintf_malloc("%s.monitor", name
);
366 source_data
.driver
= data
->driver
;
367 source_data
.module
= data
->module
;
368 source_data
.card
= data
->card
;
370 dn
= pa_proplist_gets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
371 pa_proplist_setf(source_data
.proplist
, PA_PROP_DEVICE_DESCRIPTION
, "Monitor of %s", dn
? dn
: s
->name
);
372 pa_proplist_sets(source_data
.proplist
, PA_PROP_DEVICE_CLASS
, "monitor");
374 s
->monitor_source
= pa_source_new(core
, &source_data
,
375 ((flags
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
376 ((flags
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SOURCE_DYNAMIC_LATENCY
: 0));
378 pa_source_new_data_done(&source_data
);
380 if (!s
->monitor_source
) {
386 s
->monitor_source
->monitor_of
= s
;
388 pa_source_set_latency_range(s
->monitor_source
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
389 pa_source_set_fixed_latency(s
->monitor_source
, s
->thread_info
.fixed_latency
);
390 pa_source_set_max_rewind(s
->monitor_source
, s
->thread_info
.max_rewind
);
395 /* Called from main context */
396 static int sink_set_state(pa_sink
*s
, pa_sink_state_t state
) {
398 pa_bool_t suspend_change
;
399 pa_sink_state_t original_state
;
402 pa_assert_ctl_context();
404 if (s
->state
== state
)
407 original_state
= s
->state
;
410 (original_state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(state
)) ||
411 (PA_SINK_IS_OPENED(original_state
) && state
== PA_SINK_SUSPENDED
);
414 if ((ret
= s
->set_state(s
, state
)) < 0)
418 if ((ret
= pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_STATE
, PA_UINT_TO_PTR(state
), 0, NULL
)) < 0) {
421 s
->set_state(s
, original_state
);
428 if (state
!= PA_SINK_UNLINKED
) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
429 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_STATE_CHANGED
], s
);
430 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
433 if (suspend_change
) {
437 /* We're suspending or resuming, tell everyone about it */
439 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
)
440 if (s
->state
== PA_SINK_SUSPENDED
&&
441 (i
->flags
& PA_SINK_INPUT_KILL_ON_SUSPEND
))
442 pa_sink_input_kill(i
);
444 i
->suspend(i
, state
== PA_SINK_SUSPENDED
);
446 if (s
->monitor_source
)
447 pa_source_sync_suspend(s
->monitor_source
);
453 void pa_sink_set_get_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
459 void pa_sink_set_set_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
460 pa_sink_flags_t flags
;
463 pa_assert(!s
->write_volume
|| cb
);
467 /* Save the current flags so we can tell if they've changed */
471 /* The sink implementor is responsible for setting decibel volume support */
472 s
->flags
|= PA_SINK_HW_VOLUME_CTRL
;
474 s
->flags
&= ~PA_SINK_HW_VOLUME_CTRL
;
475 /* See note below in pa_sink_put() about volume sharing and decibel volumes */
476 pa_sink_enable_decibel_volume(s
, !(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
479 /* If the flags have changed after init, let any clients know via a change event */
480 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
481 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
484 void pa_sink_set_write_volume_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
485 pa_sink_flags_t flags
;
488 pa_assert(!cb
|| s
->set_volume
);
490 s
->write_volume
= cb
;
492 /* Save the current flags so we can tell if they've changed */
496 s
->flags
|= PA_SINK_SYNC_VOLUME
;
498 s
->flags
&= ~PA_SINK_SYNC_VOLUME
;
500 /* If the flags have changed after init, let any clients know via a change event */
501 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
502 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
505 void pa_sink_set_get_mute_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
511 void pa_sink_set_set_mute_callback(pa_sink
*s
, pa_sink_cb_t cb
) {
512 pa_sink_flags_t flags
;
518 /* Save the current flags so we can tell if they've changed */
522 s
->flags
|= PA_SINK_HW_MUTE_CTRL
;
524 s
->flags
&= ~PA_SINK_HW_MUTE_CTRL
;
526 /* If the flags have changed after init, let any clients know via a change event */
527 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
528 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
531 static void enable_flat_volume(pa_sink
*s
, pa_bool_t enable
) {
532 pa_sink_flags_t flags
;
536 /* Always follow the overall user preference here */
537 enable
= enable
&& s
->core
->flat_volumes
;
539 /* Save the current flags so we can tell if they've changed */
543 s
->flags
|= PA_SINK_FLAT_VOLUME
;
545 s
->flags
&= ~PA_SINK_FLAT_VOLUME
;
547 /* If the flags have changed after init, let any clients know via a change event */
548 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
549 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
552 void pa_sink_enable_decibel_volume(pa_sink
*s
, pa_bool_t enable
) {
553 pa_sink_flags_t flags
;
557 /* Save the current flags so we can tell if they've changed */
561 s
->flags
|= PA_SINK_DECIBEL_VOLUME
;
562 enable_flat_volume(s
, TRUE
);
564 s
->flags
&= ~PA_SINK_DECIBEL_VOLUME
;
565 enable_flat_volume(s
, FALSE
);
568 /* If the flags have changed after init, let any clients know via a change event */
569 if (s
->state
!= PA_SINK_INIT
&& flags
!= s
->flags
)
570 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
573 /* Called from main context */
574 void pa_sink_put(pa_sink
* s
) {
575 pa_sink_assert_ref(s
);
576 pa_assert_ctl_context();
578 pa_assert(s
->state
== PA_SINK_INIT
);
579 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) || s
->input_to_master
);
581 /* The following fields must be initialized properly when calling _put() */
582 pa_assert(s
->asyncmsgq
);
583 pa_assert(s
->thread_info
.min_latency
<= s
->thread_info
.max_latency
);
585 /* Generally, flags should be initialized via pa_sink_new(). As a
586 * special exception we allow some volume related flags to be set
587 * between _new() and _put() by the callback setter functions above.
589 * Thus we implement a couple safeguards here which ensure the above
590 * setters were used (or at least the implementor made manual changes
591 * in a compatible way).
593 * Note: All of these flags set here can change over the life time
595 pa_assert(!(s
->flags
& PA_SINK_HW_VOLUME_CTRL
) || s
->set_volume
);
596 pa_assert(!(s
->flags
& PA_SINK_SYNC_VOLUME
) || s
->write_volume
);
597 pa_assert(!(s
->flags
& PA_SINK_HW_MUTE_CTRL
) || s
->set_mute
);
599 /* XXX: Currently decibel volume is disabled for all sinks that use volume
600 * sharing. When the master sink supports decibel volume, it would be good
601 * to have the flag also in the filter sink, but currently we don't do that
602 * so that the flags of the filter sink never change when it's moved from
603 * a master sink to another. One solution for this problem would be to
604 * remove user-visible volume altogether from filter sinks when volume
605 * sharing is used, but the current approach was easier to implement... */
606 /* We always support decibel volumes in software, otherwise we leave it to
607 * the sink implementor to set this flag as needed.
609 * Note: This flag can also change over the life time of the sink. */
610 if (!(s
->flags
& PA_SINK_HW_VOLUME_CTRL
) && !(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
611 pa_sink_enable_decibel_volume(s
, TRUE
);
613 /* If the sink implementor support DB volumes by itself, we should always
614 * try and enable flat volumes too */
615 if ((s
->flags
& PA_SINK_DECIBEL_VOLUME
))
616 enable_flat_volume(s
, TRUE
);
618 if (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) {
619 pa_sink
*root_sink
= s
->input_to_master
->sink
;
621 while (root_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)
622 root_sink
= root_sink
->input_to_master
->sink
;
624 s
->reference_volume
= root_sink
->reference_volume
;
625 pa_cvolume_remap(&s
->reference_volume
, &root_sink
->channel_map
, &s
->channel_map
);
627 s
->real_volume
= root_sink
->real_volume
;
628 pa_cvolume_remap(&s
->real_volume
, &root_sink
->channel_map
, &s
->channel_map
);
630 /* We assume that if the sink implementor changed the default
631 * volume he did so in real_volume, because that is the usual
632 * place where he is supposed to place his changes. */
633 s
->reference_volume
= s
->real_volume
;
635 s
->thread_info
.soft_volume
= s
->soft_volume
;
636 s
->thread_info
.soft_muted
= s
->muted
;
637 pa_sw_cvolume_multiply(&s
->thread_info
.current_hw_volume
, &s
->soft_volume
, &s
->real_volume
);
639 pa_assert((s
->flags
& PA_SINK_HW_VOLUME_CTRL
)
640 || (s
->base_volume
== PA_VOLUME_NORM
641 && ((s
->flags
& PA_SINK_DECIBEL_VOLUME
|| (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)))));
642 pa_assert(!(s
->flags
& PA_SINK_DECIBEL_VOLUME
) || s
->n_volume_steps
== PA_VOLUME_NORM
+1);
643 pa_assert(!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
) == (s
->thread_info
.fixed_latency
!= 0));
644 pa_assert(!(s
->flags
& PA_SINK_LATENCY
) == !(s
->monitor_source
->flags
& PA_SOURCE_LATENCY
));
645 pa_assert(!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
) == !(s
->monitor_source
->flags
& PA_SOURCE_DYNAMIC_LATENCY
));
647 pa_assert(s
->monitor_source
->thread_info
.fixed_latency
== s
->thread_info
.fixed_latency
);
648 pa_assert(s
->monitor_source
->thread_info
.min_latency
== s
->thread_info
.min_latency
);
649 pa_assert(s
->monitor_source
->thread_info
.max_latency
== s
->thread_info
.max_latency
);
651 pa_assert_se(sink_set_state(s
, PA_SINK_IDLE
) == 0);
653 pa_source_put(s
->monitor_source
);
655 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_NEW
, s
->index
);
656 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PUT
], s
);
659 /* Called from main context */
660 void pa_sink_unlink(pa_sink
* s
) {
662 pa_sink_input
*i
, *j
= NULL
;
665 pa_assert_ctl_context();
667 /* Please note that pa_sink_unlink() does more than simply
668 * reversing pa_sink_put(). It also undoes the registrations
669 * already done in pa_sink_new()! */
671 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
672 * may be called multiple times on the same sink without bad
675 linked
= PA_SINK_IS_LINKED(s
->state
);
678 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_UNLINK
], s
);
680 if (s
->state
!= PA_SINK_UNLINKED
)
681 pa_namereg_unregister(s
->core
, s
->name
);
682 pa_idxset_remove_by_data(s
->core
->sinks
, s
, NULL
);
685 pa_idxset_remove_by_data(s
->card
->sinks
, s
, NULL
);
687 while ((i
= pa_idxset_first(s
->inputs
, NULL
))) {
689 pa_sink_input_kill(i
);
694 sink_set_state(s
, PA_SINK_UNLINKED
);
696 s
->state
= PA_SINK_UNLINKED
;
700 if (s
->monitor_source
)
701 pa_source_unlink(s
->monitor_source
);
704 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
| PA_SUBSCRIPTION_EVENT_REMOVE
, s
->index
);
705 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_UNLINK_POST
], s
);
709 /* Called from main context */
710 static void sink_free(pa_object
*o
) {
711 pa_sink
*s
= PA_SINK(o
);
715 pa_assert_ctl_context();
716 pa_assert(pa_sink_refcnt(s
) == 0);
718 if (PA_SINK_IS_LINKED(s
->state
))
721 pa_log_info("Freeing sink %u \"%s\"", s
->index
, s
->name
);
723 if (s
->monitor_source
) {
724 pa_source_unref(s
->monitor_source
);
725 s
->monitor_source
= NULL
;
728 pa_idxset_free(s
->inputs
, NULL
, NULL
);
730 while ((i
= pa_hashmap_steal_first(s
->thread_info
.inputs
)))
731 pa_sink_input_unref(i
);
733 pa_hashmap_free(s
->thread_info
.inputs
, NULL
, NULL
);
735 if (s
->silence
.memblock
)
736 pa_memblock_unref(s
->silence
.memblock
);
742 pa_proplist_free(s
->proplist
);
747 while ((p
= pa_hashmap_steal_first(s
->ports
)))
748 pa_device_port_free(p
);
750 pa_hashmap_free(s
->ports
, NULL
, NULL
);
756 /* Called from main context, and not while the IO thread is active, please */
757 void pa_sink_set_asyncmsgq(pa_sink
*s
, pa_asyncmsgq
*q
) {
758 pa_sink_assert_ref(s
);
759 pa_assert_ctl_context();
763 if (s
->monitor_source
)
764 pa_source_set_asyncmsgq(s
->monitor_source
, q
);
767 /* Called from main context, and not while the IO thread is active, please */
768 void pa_sink_update_flags(pa_sink
*s
, pa_sink_flags_t mask
, pa_sink_flags_t value
) {
769 pa_sink_assert_ref(s
);
770 pa_assert_ctl_context();
775 /* For now, allow only a minimal set of flags to be changed. */
776 pa_assert((mask
& ~(PA_SINK_DYNAMIC_LATENCY
|PA_SINK_LATENCY
)) == 0);
778 s
->flags
= (s
->flags
& ~mask
) | (value
& mask
);
780 pa_source_update_flags(s
->monitor_source
,
781 ((mask
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
782 ((mask
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SOURCE_DYNAMIC_LATENCY
: 0),
783 ((value
& PA_SINK_LATENCY
) ? PA_SOURCE_LATENCY
: 0) |
784 ((value
& PA_SINK_DYNAMIC_LATENCY
) ? PA_SINK_DYNAMIC_LATENCY
: 0));
787 /* Called from IO context, or before _put() from main context */
788 void pa_sink_set_rtpoll(pa_sink
*s
, pa_rtpoll
*p
) {
789 pa_sink_assert_ref(s
);
790 pa_sink_assert_io_context(s
);
792 s
->thread_info
.rtpoll
= p
;
794 if (s
->monitor_source
)
795 pa_source_set_rtpoll(s
->monitor_source
, p
);
798 /* Called from main context */
799 int pa_sink_update_status(pa_sink
*s
) {
800 pa_sink_assert_ref(s
);
801 pa_assert_ctl_context();
802 pa_assert(PA_SINK_IS_LINKED(s
->state
));
804 if (s
->state
== PA_SINK_SUSPENDED
)
807 return sink_set_state(s
, pa_sink_used_by(s
) ? PA_SINK_RUNNING
: PA_SINK_IDLE
);
810 /* Called from main context */
811 int pa_sink_suspend(pa_sink
*s
, pa_bool_t suspend
, pa_suspend_cause_t cause
) {
812 pa_sink_assert_ref(s
);
813 pa_assert_ctl_context();
814 pa_assert(PA_SINK_IS_LINKED(s
->state
));
815 pa_assert(cause
!= 0);
818 s
->suspend_cause
|= cause
;
819 s
->monitor_source
->suspend_cause
|= cause
;
821 s
->suspend_cause
&= ~cause
;
822 s
->monitor_source
->suspend_cause
&= ~cause
;
825 if ((pa_sink_get_state(s
) == PA_SINK_SUSPENDED
) == !!s
->suspend_cause
)
828 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s
->name
, s
->suspend_cause
, s
->suspend_cause
? "suspending" : "resuming");
830 if (s
->suspend_cause
)
831 return sink_set_state(s
, PA_SINK_SUSPENDED
);
833 return sink_set_state(s
, pa_sink_used_by(s
) ? PA_SINK_RUNNING
: PA_SINK_IDLE
);
836 /* Called from main context */
837 pa_queue
*pa_sink_move_all_start(pa_sink
*s
, pa_queue
*q
) {
838 pa_sink_input
*i
, *n
;
841 pa_sink_assert_ref(s
);
842 pa_assert_ctl_context();
843 pa_assert(PA_SINK_IS_LINKED(s
->state
));
848 for (i
= PA_SINK_INPUT(pa_idxset_first(s
->inputs
, &idx
)); i
; i
= n
) {
849 n
= PA_SINK_INPUT(pa_idxset_next(s
->inputs
, &idx
));
851 pa_sink_input_ref(i
);
853 if (pa_sink_input_start_move(i
) >= 0)
856 pa_sink_input_unref(i
);
862 /* Called from main context */
863 void pa_sink_move_all_finish(pa_sink
*s
, pa_queue
*q
, pa_bool_t save
) {
866 pa_sink_assert_ref(s
);
867 pa_assert_ctl_context();
868 pa_assert(PA_SINK_IS_LINKED(s
->state
));
871 while ((i
= PA_SINK_INPUT(pa_queue_pop(q
)))) {
872 if (pa_sink_input_finish_move(i
, s
, save
) < 0)
873 pa_sink_input_fail_move(i
);
875 pa_sink_input_unref(i
);
878 pa_queue_free(q
, NULL
, NULL
);
881 /* Called from main context */
882 void pa_sink_move_all_fail(pa_queue
*q
) {
885 pa_assert_ctl_context();
888 while ((i
= PA_SINK_INPUT(pa_queue_pop(q
)))) {
889 pa_sink_input_fail_move(i
);
890 pa_sink_input_unref(i
);
893 pa_queue_free(q
, NULL
, NULL
);
896 /* Called from IO thread context */
897 void pa_sink_process_rewind(pa_sink
*s
, size_t nbytes
) {
901 pa_sink_assert_ref(s
);
902 pa_sink_assert_io_context(s
);
903 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
905 /* If nobody requested this and this is actually no real rewind
906 * then we can short cut this. Please note that this means that
907 * not all rewind requests triggered upstream will always be
908 * translated in actual requests! */
909 if (!s
->thread_info
.rewind_requested
&& nbytes
<= 0)
912 s
->thread_info
.rewind_nbytes
= 0;
913 s
->thread_info
.rewind_requested
= FALSE
;
915 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
)
919 pa_log_debug("Processing rewind...");
920 if (s
->flags
& PA_SINK_SYNC_VOLUME
)
921 pa_sink_volume_change_rewind(s
, nbytes
);
924 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
925 pa_sink_input_assert_ref(i
);
926 pa_sink_input_process_rewind(i
, nbytes
);
930 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
))
931 pa_source_process_rewind(s
->monitor_source
, nbytes
);
935 /* Called from IO thread context */
936 static unsigned fill_mix_info(pa_sink
*s
, size_t *length
, pa_mix_info
*info
, unsigned maxinfo
) {
940 size_t mixlength
= *length
;
942 pa_sink_assert_ref(s
);
943 pa_sink_assert_io_context(s
);
946 while ((i
= pa_hashmap_iterate(s
->thread_info
.inputs
, &state
, NULL
)) && maxinfo
> 0) {
947 pa_sink_input_assert_ref(i
);
949 pa_sink_input_peek(i
, *length
, &info
->chunk
, &info
->volume
);
951 if (mixlength
== 0 || info
->chunk
.length
< mixlength
)
952 mixlength
= info
->chunk
.length
;
954 if (pa_memblock_is_silence(info
->chunk
.memblock
)) {
955 pa_memblock_unref(info
->chunk
.memblock
);
959 info
->userdata
= pa_sink_input_ref(i
);
961 pa_assert(info
->chunk
.memblock
);
962 pa_assert(info
->chunk
.length
> 0);
975 /* Called from IO thread context */
976 static void inputs_drop(pa_sink
*s
, pa_mix_info
*info
, unsigned n
, pa_memchunk
*result
) {
980 unsigned n_unreffed
= 0;
982 pa_sink_assert_ref(s
);
983 pa_sink_assert_io_context(s
);
985 pa_assert(result
->memblock
);
986 pa_assert(result
->length
> 0);
988 /* We optimize for the case where the order of the inputs has not changed */
990 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
992 pa_mix_info
* m
= NULL
;
994 pa_sink_input_assert_ref(i
);
996 /* Let's try to find the matching entry info the pa_mix_info array */
997 for (j
= 0; j
< n
; j
++) {
999 if (info
[p
].userdata
== i
) {
1009 /* Drop read data */
1010 pa_sink_input_drop(i
, result
->length
);
1012 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
)) {
1014 if (pa_hashmap_size(i
->thread_info
.direct_outputs
) > 0) {
1015 void *ostate
= NULL
;
1016 pa_source_output
*o
;
1019 if (m
&& m
->chunk
.memblock
) {
1021 pa_memblock_ref(c
.memblock
);
1022 pa_assert(result
->length
<= c
.length
);
1023 c
.length
= result
->length
;
1025 pa_memchunk_make_writable(&c
, 0);
1026 pa_volume_memchunk(&c
, &s
->sample_spec
, &m
->volume
);
1029 pa_memblock_ref(c
.memblock
);
1030 pa_assert(result
->length
<= c
.length
);
1031 c
.length
= result
->length
;
1034 while ((o
= pa_hashmap_iterate(i
->thread_info
.direct_outputs
, &ostate
, NULL
))) {
1035 pa_source_output_assert_ref(o
);
1036 pa_assert(o
->direct_on_input
== i
);
1037 pa_source_post_direct(s
->monitor_source
, o
, &c
);
1040 pa_memblock_unref(c
.memblock
);
1045 if (m
->chunk
.memblock
)
1046 pa_memblock_unref(m
->chunk
.memblock
);
1047 pa_memchunk_reset(&m
->chunk
);
1049 pa_sink_input_unref(m
->userdata
);
1056 /* Now drop references to entries that are included in the
1057 * pa_mix_info array but don't exist anymore */
1059 if (n_unreffed
< n
) {
1060 for (; n
> 0; info
++, n
--) {
1062 pa_sink_input_unref(info
->userdata
);
1063 if (info
->chunk
.memblock
)
1064 pa_memblock_unref(info
->chunk
.memblock
);
1068 if (s
->monitor_source
&& PA_SOURCE_IS_LINKED(s
->monitor_source
->thread_info
.state
))
1069 pa_source_post(s
->monitor_source
, result
);
1072 /* Called from IO thread context */
1073 void pa_sink_render(pa_sink
*s
, size_t length
, pa_memchunk
*result
) {
1074 pa_mix_info info
[MAX_MIX_CHANNELS
];
1076 size_t block_size_max
;
1078 pa_sink_assert_ref(s
);
1079 pa_sink_assert_io_context(s
);
1080 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1081 pa_assert(pa_frame_aligned(length
, &s
->sample_spec
));
1084 pa_assert(!s
->thread_info
.rewind_requested
);
1085 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1087 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1088 result
->memblock
= pa_memblock_ref(s
->silence
.memblock
);
1089 result
->index
= s
->silence
.index
;
1090 result
->length
= PA_MIN(s
->silence
.length
, length
);
1097 length
= pa_frame_align(MIX_BUFFER_LENGTH
, &s
->sample_spec
);
1099 block_size_max
= pa_mempool_block_size_max(s
->core
->mempool
);
1100 if (length
> block_size_max
)
1101 length
= pa_frame_align(block_size_max
, &s
->sample_spec
);
1103 pa_assert(length
> 0);
1105 n
= fill_mix_info(s
, &length
, info
, MAX_MIX_CHANNELS
);
1109 *result
= s
->silence
;
1110 pa_memblock_ref(result
->memblock
);
1112 if (result
->length
> length
)
1113 result
->length
= length
;
1115 } else if (n
== 1) {
1118 *result
= info
[0].chunk
;
1119 pa_memblock_ref(result
->memblock
);
1121 if (result
->length
> length
)
1122 result
->length
= length
;
1124 pa_sw_cvolume_multiply(&volume
, &s
->thread_info
.soft_volume
, &info
[0].volume
);
1126 if (s
->thread_info
.soft_muted
|| pa_cvolume_is_muted(&volume
)) {
1127 pa_memblock_unref(result
->memblock
);
1128 pa_silence_memchunk_get(&s
->core
->silence_cache
,
1133 } else if (!pa_cvolume_is_norm(&volume
)) {
1134 pa_memchunk_make_writable(result
, 0);
1135 pa_volume_memchunk(result
, &s
->sample_spec
, &volume
);
1139 result
->memblock
= pa_memblock_new(s
->core
->mempool
, length
);
1141 ptr
= pa_memblock_acquire(result
->memblock
);
1142 result
->length
= pa_mix(info
, n
,
1145 &s
->thread_info
.soft_volume
,
1146 s
->thread_info
.soft_muted
);
1147 pa_memblock_release(result
->memblock
);
1152 inputs_drop(s
, info
, n
, result
);
1157 /* Called from IO thread context */
1158 void pa_sink_render_into(pa_sink
*s
, pa_memchunk
*target
) {
1159 pa_mix_info info
[MAX_MIX_CHANNELS
];
1161 size_t length
, block_size_max
;
1163 pa_sink_assert_ref(s
);
1164 pa_sink_assert_io_context(s
);
1165 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1167 pa_assert(target
->memblock
);
1168 pa_assert(target
->length
> 0);
1169 pa_assert(pa_frame_aligned(target
->length
, &s
->sample_spec
));
1171 pa_assert(!s
->thread_info
.rewind_requested
);
1172 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1174 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1175 pa_silence_memchunk(target
, &s
->sample_spec
);
1181 length
= target
->length
;
1182 block_size_max
= pa_mempool_block_size_max(s
->core
->mempool
);
1183 if (length
> block_size_max
)
1184 length
= pa_frame_align(block_size_max
, &s
->sample_spec
);
1186 pa_assert(length
> 0);
1188 n
= fill_mix_info(s
, &length
, info
, MAX_MIX_CHANNELS
);
1191 if (target
->length
> length
)
1192 target
->length
= length
;
1194 pa_silence_memchunk(target
, &s
->sample_spec
);
1195 } else if (n
== 1) {
1198 if (target
->length
> length
)
1199 target
->length
= length
;
1201 pa_sw_cvolume_multiply(&volume
, &s
->thread_info
.soft_volume
, &info
[0].volume
);
1203 if (s
->thread_info
.soft_muted
|| pa_cvolume_is_muted(&volume
))
1204 pa_silence_memchunk(target
, &s
->sample_spec
);
1208 vchunk
= info
[0].chunk
;
1209 pa_memblock_ref(vchunk
.memblock
);
1211 if (vchunk
.length
> length
)
1212 vchunk
.length
= length
;
1214 if (!pa_cvolume_is_norm(&volume
)) {
1215 pa_memchunk_make_writable(&vchunk
, 0);
1216 pa_volume_memchunk(&vchunk
, &s
->sample_spec
, &volume
);
1219 pa_memchunk_memcpy(target
, &vchunk
);
1220 pa_memblock_unref(vchunk
.memblock
);
1226 ptr
= pa_memblock_acquire(target
->memblock
);
1228 target
->length
= pa_mix(info
, n
,
1229 (uint8_t*) ptr
+ target
->index
, length
,
1231 &s
->thread_info
.soft_volume
,
1232 s
->thread_info
.soft_muted
);
1234 pa_memblock_release(target
->memblock
);
1237 inputs_drop(s
, info
, n
, target
);
1242 /* Called from IO thread context */
1243 void pa_sink_render_into_full(pa_sink
*s
, pa_memchunk
*target
) {
1247 pa_sink_assert_ref(s
);
1248 pa_sink_assert_io_context(s
);
1249 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1251 pa_assert(target
->memblock
);
1252 pa_assert(target
->length
> 0);
1253 pa_assert(pa_frame_aligned(target
->length
, &s
->sample_spec
));
1255 pa_assert(!s
->thread_info
.rewind_requested
);
1256 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1258 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
1259 pa_silence_memchunk(target
, &s
->sample_spec
);
1272 pa_sink_render_into(s
, &chunk
);
1281 /* Called from IO thread context */
1282 void pa_sink_render_full(pa_sink
*s
, size_t length
, pa_memchunk
*result
) {
1283 pa_sink_assert_ref(s
);
1284 pa_sink_assert_io_context(s
);
1285 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1286 pa_assert(length
> 0);
1287 pa_assert(pa_frame_aligned(length
, &s
->sample_spec
));
1290 pa_assert(!s
->thread_info
.rewind_requested
);
1291 pa_assert(s
->thread_info
.rewind_nbytes
== 0);
1295 pa_sink_render(s
, length
, result
);
1297 if (result
->length
< length
) {
1300 pa_memchunk_make_writable(result
, length
);
1302 chunk
.memblock
= result
->memblock
;
1303 chunk
.index
= result
->index
+ result
->length
;
1304 chunk
.length
= length
- result
->length
;
1306 pa_sink_render_into_full(s
, &chunk
);
1308 result
->length
= length
;
1314 /* Called from main thread */
1315 pa_usec_t
pa_sink_get_latency(pa_sink
*s
) {
1318 pa_sink_assert_ref(s
);
1319 pa_assert_ctl_context();
1320 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1322 /* The returned value is supposed to be in the time domain of the sound card! */
1324 if (s
->state
== PA_SINK_SUSPENDED
)
1327 if (!(s
->flags
& PA_SINK_LATENCY
))
1330 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_LATENCY
, &usec
, 0, NULL
) == 0);
1335 /* Called from IO thread */
1336 pa_usec_t
pa_sink_get_latency_within_thread(pa_sink
*s
) {
1340 pa_sink_assert_ref(s
);
1341 pa_sink_assert_io_context(s
);
1342 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
1344 /* The returned value is supposed to be in the time domain of the sound card! */
1346 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
)
1349 if (!(s
->flags
& PA_SINK_LATENCY
))
1352 o
= PA_MSGOBJECT(s
);
1354 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1356 if (o
->process_msg(o
, PA_SINK_MESSAGE_GET_LATENCY
, &usec
, 0, NULL
) < 0)
1362 /* Called from the main thread (and also from the IO thread while the main
1363 * thread is waiting).
1365 * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
1366 * set. Instead, flat volume mode is detected by checking whether the root sink
1367 * has the flag set. */
1368 pa_bool_t
pa_sink_flat_volume_enabled(pa_sink
*s
) {
1369 pa_sink_assert_ref(s
);
1371 while (s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)
1372 s
= s
->input_to_master
->sink
;
1374 return (s
->flags
& PA_SINK_FLAT_VOLUME
);
1377 /* Called from main context */
1378 pa_bool_t
pa_sink_is_passthrough(pa_sink
*s
) {
1379 pa_sink_input
*alt_i
;
1382 pa_sink_assert_ref(s
);
1384 /* one and only one PASSTHROUGH input can possibly be connected */
1385 if (pa_idxset_size(s
->inputs
) == 1) {
1386 alt_i
= pa_idxset_first(s
->inputs
, &idx
);
1388 if (pa_sink_input_is_passthrough(alt_i
))
1395 /* Called from main context. */
1396 static void compute_reference_ratio(pa_sink_input
*i
) {
1398 pa_cvolume remapped
;
1401 pa_assert(pa_sink_flat_volume_enabled(i
->sink
));
1404 * Calculates the reference ratio from the sink's reference
1405 * volume. This basically calculates:
1407 * i->reference_ratio = i->volume / i->sink->reference_volume
1410 remapped
= i
->sink
->reference_volume
;
1411 pa_cvolume_remap(&remapped
, &i
->sink
->channel_map
, &i
->channel_map
);
1413 i
->reference_ratio
.channels
= i
->sample_spec
.channels
;
1415 for (c
= 0; c
< i
->sample_spec
.channels
; c
++) {
1417 /* We don't update when the sink volume is 0 anyway */
1418 if (remapped
.values
[c
] <= PA_VOLUME_MUTED
)
1421 /* Don't update the reference ratio unless necessary */
1422 if (pa_sw_volume_multiply(
1423 i
->reference_ratio
.values
[c
],
1424 remapped
.values
[c
]) == i
->volume
.values
[c
])
1427 i
->reference_ratio
.values
[c
] = pa_sw_volume_divide(
1428 i
->volume
.values
[c
],
1429 remapped
.values
[c
]);
1433 /* Called from main context. Only called for the root sink in volume sharing
1434 * cases, except for internal recursive calls. */
1435 static void compute_reference_ratios(pa_sink
*s
) {
1439 pa_sink_assert_ref(s
);
1440 pa_assert_ctl_context();
1441 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1442 pa_assert(pa_sink_flat_volume_enabled(s
));
1444 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1445 compute_reference_ratio(i
);
1447 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1448 compute_reference_ratios(i
->origin_sink
);
1452 /* Called from main context. Only called for the root sink in volume sharing
1453 * cases, except for internal recursive calls. */
1454 static void compute_real_ratios(pa_sink
*s
) {
1458 pa_sink_assert_ref(s
);
1459 pa_assert_ctl_context();
1460 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1461 pa_assert(pa_sink_flat_volume_enabled(s
));
1463 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1465 pa_cvolume remapped
;
1467 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1468 /* The origin sink uses volume sharing, so this input's real ratio
1469 * is handled as a special case - the real ratio must be 0 dB, and
1470 * as a result i->soft_volume must equal i->volume_factor. */
1471 pa_cvolume_reset(&i
->real_ratio
, i
->real_ratio
.channels
);
1472 i
->soft_volume
= i
->volume_factor
;
1474 compute_real_ratios(i
->origin_sink
);
1480 * This basically calculates:
1482 * i->real_ratio := i->volume / s->real_volume
1483 * i->soft_volume := i->real_ratio * i->volume_factor
1486 remapped
= s
->real_volume
;
1487 pa_cvolume_remap(&remapped
, &s
->channel_map
, &i
->channel_map
);
1489 i
->real_ratio
.channels
= i
->sample_spec
.channels
;
1490 i
->soft_volume
.channels
= i
->sample_spec
.channels
;
1492 for (c
= 0; c
< i
->sample_spec
.channels
; c
++) {
1494 if (remapped
.values
[c
] <= PA_VOLUME_MUTED
) {
1495 /* We leave i->real_ratio untouched */
1496 i
->soft_volume
.values
[c
] = PA_VOLUME_MUTED
;
1500 /* Don't lose accuracy unless necessary */
1501 if (pa_sw_volume_multiply(
1502 i
->real_ratio
.values
[c
],
1503 remapped
.values
[c
]) != i
->volume
.values
[c
])
1505 i
->real_ratio
.values
[c
] = pa_sw_volume_divide(
1506 i
->volume
.values
[c
],
1507 remapped
.values
[c
]);
1509 i
->soft_volume
.values
[c
] = pa_sw_volume_multiply(
1510 i
->real_ratio
.values
[c
],
1511 i
->volume_factor
.values
[c
]);
1514 /* We don't copy the soft_volume to the thread_info data
1515 * here. That must be done by the caller */
1519 static pa_cvolume
*cvolume_remap_minimal_impact(
1521 const pa_cvolume
*template,
1522 const pa_channel_map
*from
,
1523 const pa_channel_map
*to
) {
1528 pa_assert(template);
1531 pa_assert(pa_cvolume_compatible_with_channel_map(v
, from
));
1532 pa_assert(pa_cvolume_compatible_with_channel_map(template, to
));
1534 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1535 * mapping from sink input to sink volumes:
1537 * If template is a possible remapping from v it is used instead
1538 * of remapping anew.
1540 * If the channel maps don't match we set an all-channel volume on
1541 * the sink to ensure that changing a volume on one stream has no
1542 * effect that cannot be compensated for in another stream that
1543 * does not have the same channel map as the sink. */
1545 if (pa_channel_map_equal(from
, to
))
1549 if (pa_cvolume_equal(pa_cvolume_remap(&t
, to
, from
), v
)) {
1554 pa_cvolume_set(v
, to
->channels
, pa_cvolume_max(v
));
1558 /* Called from main thread. Only called for the root sink in volume sharing
1559 * cases, except for internal recursive calls. */
1560 static void get_maximum_input_volume(pa_sink
*s
, pa_cvolume
*max_volume
, const pa_channel_map
*channel_map
) {
1564 pa_sink_assert_ref(s
);
1565 pa_assert(max_volume
);
1566 pa_assert(channel_map
);
1567 pa_assert(pa_sink_flat_volume_enabled(s
));
1569 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1570 pa_cvolume remapped
;
1572 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1573 get_maximum_input_volume(i
->origin_sink
, max_volume
, channel_map
);
1575 /* Ignore this input. The origin sink uses volume sharing, so this
1576 * input's volume will be set to be equal to the root sink's real
1577 * volume. Obviously this input's current volume must not then
1578 * affect what the root sink's real volume will be. */
1582 remapped
= i
->volume
;
1583 cvolume_remap_minimal_impact(&remapped
, max_volume
, &i
->channel_map
, channel_map
);
1584 pa_cvolume_merge(max_volume
, max_volume
, &remapped
);
1588 /* Called from main thread. Only called for the root sink in volume sharing
1589 * cases, except for internal recursive calls. */
1590 static pa_bool_t
has_inputs(pa_sink
*s
) {
1594 pa_sink_assert_ref(s
);
1596 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1597 if (!i
->origin_sink
|| !(i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
) || has_inputs(i
->origin_sink
))
1604 /* Called from main thread. Only called for the root sink in volume sharing
1605 * cases, except for internal recursive calls. */
1606 static void update_real_volume(pa_sink
*s
, const pa_cvolume
*new_volume
, pa_channel_map
*channel_map
) {
1610 pa_sink_assert_ref(s
);
1611 pa_assert(new_volume
);
1612 pa_assert(channel_map
);
1614 s
->real_volume
= *new_volume
;
1615 pa_cvolume_remap(&s
->real_volume
, channel_map
, &s
->channel_map
);
1617 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1618 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1619 if (pa_sink_flat_volume_enabled(s
)) {
1620 pa_cvolume old_volume
= i
->volume
;
1622 /* Follow the root sink's real volume. */
1623 i
->volume
= *new_volume
;
1624 pa_cvolume_remap(&i
->volume
, channel_map
, &i
->channel_map
);
1625 compute_reference_ratio(i
);
1627 /* The volume changed, let's tell people so */
1628 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
1629 if (i
->volume_changed
)
1630 i
->volume_changed(i
);
1632 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
1636 update_real_volume(i
->origin_sink
, new_volume
, channel_map
);
1641 /* Called from main thread. Only called for the root sink in shared volume
1643 static void compute_real_volume(pa_sink
*s
) {
1644 pa_sink_assert_ref(s
);
1645 pa_assert_ctl_context();
1646 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1647 pa_assert(pa_sink_flat_volume_enabled(s
));
1648 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
1650 /* This determines the maximum volume of all streams and sets
1651 * s->real_volume accordingly. */
1653 if (!has_inputs(s
)) {
1654 /* In the special case that we have no sink inputs we leave the
1655 * volume unmodified. */
1656 update_real_volume(s
, &s
->reference_volume
, &s
->channel_map
);
1660 pa_cvolume_mute(&s
->real_volume
, s
->channel_map
.channels
);
1662 /* First let's determine the new maximum volume of all inputs
1663 * connected to this sink */
1664 get_maximum_input_volume(s
, &s
->real_volume
, &s
->channel_map
);
1665 update_real_volume(s
, &s
->real_volume
, &s
->channel_map
);
1667 /* Then, let's update the real ratios/soft volumes of all inputs
1668 * connected to this sink */
1669 compute_real_ratios(s
);
1672 /* Called from main thread. Only called for the root sink in shared volume
1673 * cases, except for internal recursive calls. */
1674 static void propagate_reference_volume(pa_sink
*s
) {
1678 pa_sink_assert_ref(s
);
1679 pa_assert_ctl_context();
1680 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1681 pa_assert(pa_sink_flat_volume_enabled(s
));
1683 /* This is called whenever the sink volume changes that is not
1684 * caused by a sink input volume change. We need to fix up the
1685 * sink input volumes accordingly */
1687 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1688 pa_cvolume old_volume
;
1690 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1691 propagate_reference_volume(i
->origin_sink
);
1693 /* Since the origin sink uses volume sharing, this input's volume
1694 * needs to be updated to match the root sink's real volume, but
1695 * that will be done later in update_shared_real_volume(). */
1699 old_volume
= i
->volume
;
1701 /* This basically calculates:
1703 * i->volume := s->reference_volume * i->reference_ratio */
1705 i
->volume
= s
->reference_volume
;
1706 pa_cvolume_remap(&i
->volume
, &s
->channel_map
, &i
->channel_map
);
1707 pa_sw_cvolume_multiply(&i
->volume
, &i
->volume
, &i
->reference_ratio
);
1709 /* The volume changed, let's tell people so */
1710 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
1712 if (i
->volume_changed
)
1713 i
->volume_changed(i
);
1715 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
1720 /* Called from main thread. Only called for the root sink in volume sharing
1721 * cases, except for internal recursive calls. The return value indicates
1722 * whether any reference volume actually changed. */
1723 static pa_bool_t
update_reference_volume(pa_sink
*s
, const pa_cvolume
*v
, const pa_channel_map
*channel_map
, pa_bool_t save
) {
1725 pa_bool_t reference_volume_changed
;
1729 pa_sink_assert_ref(s
);
1730 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1732 pa_assert(channel_map
);
1733 pa_assert(pa_cvolume_valid(v
));
1736 pa_cvolume_remap(&volume
, channel_map
, &s
->channel_map
);
1738 reference_volume_changed
= !pa_cvolume_equal(&volume
, &s
->reference_volume
);
1739 s
->reference_volume
= volume
;
1741 s
->save_volume
= (!reference_volume_changed
&& s
->save_volume
) || save
;
1743 if (reference_volume_changed
)
1744 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
1745 else if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1746 /* If the root sink's volume doesn't change, then there can't be any
1747 * changes in the other sinks in the sink tree either.
1749 * It's probably theoretically possible that even if the root sink's
1750 * volume changes slightly, some filter sink doesn't change its volume
1751 * due to rounding errors. If that happens, we still want to propagate
1752 * the changed root sink volume to the sinks connected to the
1753 * intermediate sink that didn't change its volume. This theoretical
1754 * possiblity is the reason why we have that !(s->flags &
1755 * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1756 * notice even if we returned here FALSE always if
1757 * reference_volume_changed is FALSE. */
1760 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1761 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1762 update_reference_volume(i
->origin_sink
, v
, channel_map
, FALSE
);
1768 /* Called from main thread */
1769 void pa_sink_set_volume(
1771 const pa_cvolume
*volume
,
1775 pa_cvolume new_reference_volume
;
1776 pa_sink
*root_sink
= s
;
1778 pa_sink_assert_ref(s
);
1779 pa_assert_ctl_context();
1780 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1781 pa_assert(!volume
|| pa_cvolume_valid(volume
));
1782 pa_assert(volume
|| pa_sink_flat_volume_enabled(s
));
1783 pa_assert(!volume
|| volume
->channels
== 1 || pa_cvolume_compatible(volume
, &s
->sample_spec
));
1785 /* make sure we don't change the volume when a PASSTHROUGH input is connected */
1786 if (pa_sink_is_passthrough(s
)) {
1787 /* FIXME: Need to notify client that volume control is disabled */
1788 pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
1792 /* In case of volume sharing, the volume is set for the root sink first,
1793 * from which it's then propagated to the sharing sinks. */
1794 while (root_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)
1795 root_sink
= root_sink
->input_to_master
->sink
;
1797 /* As a special exception we accept mono volumes on all sinks --
1798 * even on those with more complex channel maps */
1801 if (pa_cvolume_compatible(volume
, &s
->sample_spec
))
1802 new_reference_volume
= *volume
;
1804 new_reference_volume
= s
->reference_volume
;
1805 pa_cvolume_scale(&new_reference_volume
, pa_cvolume_max(volume
));
1808 pa_cvolume_remap(&new_reference_volume
, &s
->channel_map
, &root_sink
->channel_map
);
1811 /* If volume is NULL we synchronize the sink's real and reference
1812 * volumes with the stream volumes. If it is not NULL we update
1813 * the reference_volume with it. */
1816 if (update_reference_volume(root_sink
, &new_reference_volume
, &root_sink
->channel_map
, save
)) {
1817 if (pa_sink_flat_volume_enabled(root_sink
)) {
1818 /* OK, propagate this volume change back to the inputs */
1819 propagate_reference_volume(root_sink
);
1821 /* And now recalculate the real volume */
1822 compute_real_volume(root_sink
);
1824 update_real_volume(root_sink
, &root_sink
->reference_volume
, &root_sink
->channel_map
);
1828 pa_assert(pa_sink_flat_volume_enabled(root_sink
));
1830 /* Ok, let's determine the new real volume */
1831 compute_real_volume(root_sink
);
1833 /* Let's 'push' the reference volume if necessary */
1834 pa_cvolume_merge(&new_reference_volume
, &s
->reference_volume
, &root_sink
->real_volume
);
1835 update_reference_volume(root_sink
, &new_reference_volume
, &root_sink
->channel_map
, save
);
1837 /* Now that the reference volume is updated, we can update the streams'
1838 * reference ratios. */
1839 compute_reference_ratios(root_sink
);
1842 if (root_sink
->set_volume
) {
1843 /* If we have a function set_volume(), then we do not apply a
1844 * soft volume by default. However, set_volume() is free to
1845 * apply one to root_sink->soft_volume */
1847 pa_cvolume_reset(&root_sink
->soft_volume
, root_sink
->sample_spec
.channels
);
1848 if (!(root_sink
->flags
& PA_SINK_SYNC_VOLUME
))
1849 root_sink
->set_volume(root_sink
);
1852 /* If we have no function set_volume(), then the soft volume
1853 * becomes the real volume */
1854 root_sink
->soft_volume
= root_sink
->real_volume
;
1856 /* This tells the sink that soft volume and/or real volume changed */
1858 pa_assert_se(pa_asyncmsgq_send(root_sink
->asyncmsgq
, PA_MSGOBJECT(root_sink
), PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
) == 0);
1861 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1862 * Only to be called by sink implementor */
1863 void pa_sink_set_soft_volume(pa_sink
*s
, const pa_cvolume
*volume
) {
1865 pa_sink_assert_ref(s
);
1866 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
1868 if (s
->flags
& PA_SINK_SYNC_VOLUME
)
1869 pa_sink_assert_io_context(s
);
1871 pa_assert_ctl_context();
1874 pa_cvolume_reset(&s
->soft_volume
, s
->sample_spec
.channels
);
1876 s
->soft_volume
= *volume
;
1878 if (PA_SINK_IS_LINKED(s
->state
) && !(s
->flags
& PA_SINK_SYNC_VOLUME
))
1879 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_VOLUME
, NULL
, 0, NULL
) == 0);
1881 s
->thread_info
.soft_volume
= s
->soft_volume
;
1884 /* Called from the main thread. Only called for the root sink in volume sharing
1885 * cases, except for internal recursive calls. */
1886 static void propagate_real_volume(pa_sink
*s
, const pa_cvolume
*old_real_volume
) {
1890 pa_sink_assert_ref(s
);
1891 pa_assert(old_real_volume
);
1892 pa_assert_ctl_context();
1893 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1895 /* This is called when the hardware's real volume changes due to
1896 * some external event. We copy the real volume into our
1897 * reference volume and then rebuild the stream volumes based on
1898 * i->real_ratio which should stay fixed. */
1900 if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)) {
1901 if (pa_cvolume_equal(old_real_volume
, &s
->real_volume
))
1904 /* 1. Make the real volume the reference volume */
1905 update_reference_volume(s
, &s
->real_volume
, &s
->channel_map
, TRUE
);
1908 if (pa_sink_flat_volume_enabled(s
)) {
1910 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
1911 pa_cvolume old_volume
= i
->volume
;
1913 /* 2. Since the sink's reference and real volumes are equal
1914 * now our ratios should be too. */
1915 i
->reference_ratio
= i
->real_ratio
;
1917 /* 3. Recalculate the new stream reference volume based on the
1918 * reference ratio and the sink's reference volume.
1920 * This basically calculates:
1922 * i->volume = s->reference_volume * i->reference_ratio
1924 * This is identical to propagate_reference_volume() */
1925 i
->volume
= s
->reference_volume
;
1926 pa_cvolume_remap(&i
->volume
, &s
->channel_map
, &i
->channel_map
);
1927 pa_sw_cvolume_multiply(&i
->volume
, &i
->volume
, &i
->reference_ratio
);
1929 /* Notify if something changed */
1930 if (!pa_cvolume_equal(&old_volume
, &i
->volume
)) {
1932 if (i
->volume_changed
)
1933 i
->volume_changed(i
);
1935 pa_subscription_post(i
->core
, PA_SUBSCRIPTION_EVENT_SINK_INPUT
|PA_SUBSCRIPTION_EVENT_CHANGE
, i
->index
);
1938 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1939 propagate_real_volume(i
->origin_sink
, old_real_volume
);
1943 /* Something got changed in the hardware. It probably makes sense
1944 * to save changed hw settings given that hw volume changes not
1945 * triggered by PA are almost certainly done by the user. */
1946 if (!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
1947 s
->save_volume
= TRUE
;
1950 /* Called from io thread */
1951 void pa_sink_update_volume_and_mute(pa_sink
*s
) {
1953 pa_sink_assert_io_context(s
);
1955 pa_asyncmsgq_post(pa_thread_mq_get()->outq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE
, NULL
, 0, NULL
, NULL
);
1958 /* Called from main thread */
1959 const pa_cvolume
*pa_sink_get_volume(pa_sink
*s
, pa_bool_t force_refresh
) {
1960 pa_sink_assert_ref(s
);
1961 pa_assert_ctl_context();
1962 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1964 if (s
->refresh_volume
|| force_refresh
) {
1965 struct pa_cvolume old_real_volume
;
1967 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
1969 old_real_volume
= s
->real_volume
;
1971 if (!(s
->flags
& PA_SINK_SYNC_VOLUME
) && s
->get_volume
)
1974 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_VOLUME
, NULL
, 0, NULL
) == 0);
1976 update_real_volume(s
, &s
->real_volume
, &s
->channel_map
);
1977 propagate_real_volume(s
, &old_real_volume
);
1980 return &s
->reference_volume
;
1983 /* Called from main thread. In volume sharing cases, only the root sink may
1985 void pa_sink_volume_changed(pa_sink
*s
, const pa_cvolume
*new_real_volume
) {
1986 pa_cvolume old_real_volume
;
1988 pa_sink_assert_ref(s
);
1989 pa_assert_ctl_context();
1990 pa_assert(PA_SINK_IS_LINKED(s
->state
));
1991 pa_assert(!(s
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
));
1993 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1995 old_real_volume
= s
->real_volume
;
1996 update_real_volume(s
, new_real_volume
, &s
->channel_map
);
1997 propagate_real_volume(s
, &old_real_volume
);
2000 /* Called from main thread */
2001 void pa_sink_set_mute(pa_sink
*s
, pa_bool_t mute
, pa_bool_t save
) {
2002 pa_bool_t old_muted
;
2004 pa_sink_assert_ref(s
);
2005 pa_assert_ctl_context();
2006 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2008 old_muted
= s
->muted
;
2010 s
->save_muted
= (old_muted
== s
->muted
&& s
->save_muted
) || save
;
2012 if (!(s
->flags
& PA_SINK_SYNC_VOLUME
) && s
->set_mute
)
2015 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MUTE
, NULL
, 0, NULL
) == 0);
2017 if (old_muted
!= s
->muted
)
2018 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2021 /* Called from main thread */
2022 pa_bool_t
pa_sink_get_mute(pa_sink
*s
, pa_bool_t force_refresh
) {
2024 pa_sink_assert_ref(s
);
2025 pa_assert_ctl_context();
2026 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2028 if (s
->refresh_muted
|| force_refresh
) {
2029 pa_bool_t old_muted
= s
->muted
;
2031 if (!(s
->flags
& PA_SINK_SYNC_VOLUME
) && s
->get_mute
)
2034 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MUTE
, NULL
, 0, NULL
) == 0);
2036 if (old_muted
!= s
->muted
) {
2037 s
->save_muted
= TRUE
;
2039 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2041 /* Make sure the soft mute status stays in sync */
2042 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MUTE
, NULL
, 0, NULL
) == 0);
2049 /* Called from main thread */
2050 void pa_sink_mute_changed(pa_sink
*s
, pa_bool_t new_muted
) {
2051 pa_sink_assert_ref(s
);
2052 pa_assert_ctl_context();
2053 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2055 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
2057 if (s
->muted
== new_muted
)
2060 s
->muted
= new_muted
;
2061 s
->save_muted
= TRUE
;
2063 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2066 /* Called from main thread */
2067 pa_bool_t
pa_sink_update_proplist(pa_sink
*s
, pa_update_mode_t mode
, pa_proplist
*p
) {
2068 pa_sink_assert_ref(s
);
2069 pa_assert_ctl_context();
2072 pa_proplist_update(s
->proplist
, mode
, p
);
2074 if (PA_SINK_IS_LINKED(s
->state
)) {
2075 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PROPLIST_CHANGED
], s
);
2076 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2082 /* Called from main thread */
2083 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
2084 void pa_sink_set_description(pa_sink
*s
, const char *description
) {
2086 pa_sink_assert_ref(s
);
2087 pa_assert_ctl_context();
2089 if (!description
&& !pa_proplist_contains(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
))
2092 old
= pa_proplist_gets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
2094 if (old
&& description
&& pa_streq(old
, description
))
2098 pa_proplist_sets(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
, description
);
2100 pa_proplist_unset(s
->proplist
, PA_PROP_DEVICE_DESCRIPTION
);
2102 if (s
->monitor_source
) {
2105 n
= pa_sprintf_malloc("Monitor Source of %s", description
? description
: s
->name
);
2106 pa_source_set_description(s
->monitor_source
, n
);
2110 if (PA_SINK_IS_LINKED(s
->state
)) {
2111 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
2112 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PROPLIST_CHANGED
], s
);
2116 /* Called from main thread */
2117 unsigned pa_sink_linked_by(pa_sink
*s
) {
2120 pa_sink_assert_ref(s
);
2121 pa_assert_ctl_context();
2122 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2124 ret
= pa_idxset_size(s
->inputs
);
2126 /* We add in the number of streams connected to us here. Please
2127 * note the asymmmetry to pa_sink_used_by()! */
2129 if (s
->monitor_source
)
2130 ret
+= pa_source_linked_by(s
->monitor_source
);
2135 /* Called from main thread */
2136 unsigned pa_sink_used_by(pa_sink
*s
) {
2139 pa_sink_assert_ref(s
);
2140 pa_assert_ctl_context();
2141 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2143 ret
= pa_idxset_size(s
->inputs
);
2144 pa_assert(ret
>= s
->n_corked
);
2146 /* Streams connected to our monitor source do not matter for
2147 * pa_sink_used_by()!.*/
2149 return ret
- s
->n_corked
;
2152 /* Called from main thread */
2153 unsigned pa_sink_check_suspend(pa_sink
*s
) {
2158 pa_sink_assert_ref(s
);
2159 pa_assert_ctl_context();
2161 if (!PA_SINK_IS_LINKED(s
->state
))
2166 PA_IDXSET_FOREACH(i
, s
->inputs
, idx
) {
2167 pa_sink_input_state_t st
;
2169 st
= pa_sink_input_get_state(i
);
2171 /* We do not assert here. It is perfectly valid for a sink input to
2172 * be in the INIT state (i.e. created, marked done but not yet put)
2173 * and we should not care if it's unlinked as it won't contribute
2174 * towarards our busy status.
2176 if (!PA_SINK_INPUT_IS_LINKED(st
))
2179 if (st
== PA_SINK_INPUT_CORKED
)
2182 if (i
->flags
& PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND
)
2188 if (s
->monitor_source
)
2189 ret
+= pa_source_check_suspend(s
->monitor_source
);
2194 /* Called from the IO thread */
2195 static void sync_input_volumes_within_thread(pa_sink
*s
) {
2199 pa_sink_assert_ref(s
);
2200 pa_sink_assert_io_context(s
);
2202 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
2203 if (pa_cvolume_equal(&i
->thread_info
.soft_volume
, &i
->soft_volume
))
2206 i
->thread_info
.soft_volume
= i
->soft_volume
;
2207 pa_sink_input_request_rewind(i
, 0, TRUE
, FALSE
, FALSE
);
2211 /* Called from the IO thread. Only called for the root sink in volume sharing
2212 * cases, except for internal recursive calls. */
2213 static void set_shared_volume_within_thread(pa_sink
*s
) {
2214 pa_sink_input
*i
= NULL
;
2217 pa_sink_assert_ref(s
);
2219 PA_MSGOBJECT(s
)->process_msg(PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_VOLUME_SYNCED
, NULL
, 0, NULL
);
2221 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
) {
2222 if (i
->origin_sink
&& (i
->origin_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
))
2223 set_shared_volume_within_thread(i
->origin_sink
);
2227 /* Called from IO thread, except when it is not */
2228 int pa_sink_process_msg(pa_msgobject
*o
, int code
, void *userdata
, int64_t offset
, pa_memchunk
*chunk
) {
2229 pa_sink
*s
= PA_SINK(o
);
2230 pa_sink_assert_ref(s
);
2232 switch ((pa_sink_message_t
) code
) {
2234 case PA_SINK_MESSAGE_ADD_INPUT
: {
2235 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2237 /* If you change anything here, make sure to change the
2238 * sink input handling a few lines down at
2239 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
2241 pa_hashmap_put(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
), pa_sink_input_ref(i
));
2243 /* Since the caller sleeps in pa_sink_input_put(), we can
2244 * safely access data outside of thread_info even though
2247 if ((i
->thread_info
.sync_prev
= i
->sync_prev
)) {
2248 pa_assert(i
->sink
== i
->thread_info
.sync_prev
->sink
);
2249 pa_assert(i
->sync_prev
->sync_next
== i
);
2250 i
->thread_info
.sync_prev
->thread_info
.sync_next
= i
;
2253 if ((i
->thread_info
.sync_next
= i
->sync_next
)) {
2254 pa_assert(i
->sink
== i
->thread_info
.sync_next
->sink
);
2255 pa_assert(i
->sync_next
->sync_prev
== i
);
2256 i
->thread_info
.sync_next
->thread_info
.sync_prev
= i
;
2259 pa_assert(!i
->thread_info
.attached
);
2260 i
->thread_info
.attached
= TRUE
;
2265 pa_sink_input_set_state_within_thread(i
, i
->state
);
2267 /* The requested latency of the sink input needs to be
2268 * fixed up and then configured on the sink */
2270 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1)
2271 pa_sink_input_set_requested_latency_within_thread(i
, i
->thread_info
.requested_sink_latency
);
2273 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
2274 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
2276 /* We don't rewind here automatically. This is left to the
2277 * sink input implementor because some sink inputs need a
2278 * slow start, i.e. need some time to buffer client
2279 * samples before beginning streaming. */
2281 /* In flat volume mode we need to update the volume as
2283 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2286 case PA_SINK_MESSAGE_REMOVE_INPUT
: {
2287 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2289 /* If you change anything here, make sure to change the
2290 * sink input handling a few lines down at
2291 * PA_SINK_MESSAGE_START_MOVE, too. */
2296 pa_sink_input_set_state_within_thread(i
, i
->state
);
2298 pa_assert(i
->thread_info
.attached
);
2299 i
->thread_info
.attached
= FALSE
;
2301 /* Since the caller sleeps in pa_sink_input_unlink(),
2302 * we can safely access data outside of thread_info even
2303 * though it is mutable */
2305 pa_assert(!i
->sync_prev
);
2306 pa_assert(!i
->sync_next
);
2308 if (i
->thread_info
.sync_prev
) {
2309 i
->thread_info
.sync_prev
->thread_info
.sync_next
= i
->thread_info
.sync_prev
->sync_next
;
2310 i
->thread_info
.sync_prev
= NULL
;
2313 if (i
->thread_info
.sync_next
) {
2314 i
->thread_info
.sync_next
->thread_info
.sync_prev
= i
->thread_info
.sync_next
->sync_prev
;
2315 i
->thread_info
.sync_next
= NULL
;
2318 if (pa_hashmap_remove(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
)))
2319 pa_sink_input_unref(i
);
2321 pa_sink_invalidate_requested_latency(s
, TRUE
);
2322 pa_sink_request_rewind(s
, (size_t) -1);
2324 /* In flat volume mode we need to update the volume as
2326 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2329 case PA_SINK_MESSAGE_START_MOVE
: {
2330 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2332 /* We don't support moving synchronized streams. */
2333 pa_assert(!i
->sync_prev
);
2334 pa_assert(!i
->sync_next
);
2335 pa_assert(!i
->thread_info
.sync_next
);
2336 pa_assert(!i
->thread_info
.sync_prev
);
2338 if (i
->thread_info
.state
!= PA_SINK_INPUT_CORKED
) {
2340 size_t sink_nbytes
, total_nbytes
;
2342 /* Get the latency of the sink */
2343 usec
= pa_sink_get_latency_within_thread(s
);
2344 sink_nbytes
= pa_usec_to_bytes(usec
, &s
->sample_spec
);
2345 total_nbytes
= sink_nbytes
+ pa_memblockq_get_length(i
->thread_info
.render_memblockq
);
2347 if (total_nbytes
> 0) {
2348 i
->thread_info
.rewrite_nbytes
= i
->thread_info
.resampler
? pa_resampler_request(i
->thread_info
.resampler
, total_nbytes
) : total_nbytes
;
2349 i
->thread_info
.rewrite_flush
= TRUE
;
2350 pa_sink_input_process_rewind(i
, sink_nbytes
);
2357 pa_assert(i
->thread_info
.attached
);
2358 i
->thread_info
.attached
= FALSE
;
2360 /* Let's remove the sink input ...*/
2361 if (pa_hashmap_remove(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
)))
2362 pa_sink_input_unref(i
);
2364 pa_sink_invalidate_requested_latency(s
, TRUE
);
2366 pa_log_debug("Requesting rewind due to started move");
2367 pa_sink_request_rewind(s
, (size_t) -1);
2369 /* In flat volume mode we need to update the volume as
2371 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2374 case PA_SINK_MESSAGE_FINISH_MOVE
: {
2375 pa_sink_input
*i
= PA_SINK_INPUT(userdata
);
2377 /* We don't support moving synchronized streams. */
2378 pa_assert(!i
->sync_prev
);
2379 pa_assert(!i
->sync_next
);
2380 pa_assert(!i
->thread_info
.sync_next
);
2381 pa_assert(!i
->thread_info
.sync_prev
);
2383 pa_hashmap_put(s
->thread_info
.inputs
, PA_UINT32_TO_PTR(i
->index
), pa_sink_input_ref(i
));
2385 pa_assert(!i
->thread_info
.attached
);
2386 i
->thread_info
.attached
= TRUE
;
2391 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1)
2392 pa_sink_input_set_requested_latency_within_thread(i
, i
->thread_info
.requested_sink_latency
);
2394 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
2395 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
2397 if (i
->thread_info
.state
!= PA_SINK_INPUT_CORKED
) {
2401 /* Get the latency of the sink */
2402 usec
= pa_sink_get_latency_within_thread(s
);
2403 nbytes
= pa_usec_to_bytes(usec
, &s
->sample_spec
);
2406 pa_sink_input_drop(i
, nbytes
);
2408 pa_log_debug("Requesting rewind due to finished move");
2409 pa_sink_request_rewind(s
, nbytes
);
2412 return o
->process_msg(o
, PA_SINK_MESSAGE_SET_SHARED_VOLUME
, NULL
, 0, NULL
);
2415 case PA_SINK_MESSAGE_SET_SHARED_VOLUME
: {
2416 pa_sink
*root_sink
= s
;
2418 while (root_sink
->flags
& PA_SINK_SHARE_VOLUME_WITH_MASTER
)
2419 root_sink
= root_sink
->input_to_master
->sink
;
2421 set_shared_volume_within_thread(root_sink
);
2425 case PA_SINK_MESSAGE_SET_VOLUME_SYNCED
:
2427 if (s
->flags
& PA_SINK_SYNC_VOLUME
) {
2429 pa_sink_volume_change_push(s
);
2431 /* Fall through ... */
2433 case PA_SINK_MESSAGE_SET_VOLUME
:
2435 if (!pa_cvolume_equal(&s
->thread_info
.soft_volume
, &s
->soft_volume
)) {
2436 s
->thread_info
.soft_volume
= s
->soft_volume
;
2437 pa_sink_request_rewind(s
, (size_t) -1);
2440 /* Fall through ... */
2442 case PA_SINK_MESSAGE_SYNC_VOLUMES
:
2443 sync_input_volumes_within_thread(s
);
2446 case PA_SINK_MESSAGE_GET_VOLUME
:
2448 if ((s
->flags
& PA_SINK_SYNC_VOLUME
) && s
->get_volume
) {
2450 pa_sink_volume_change_flush(s
);
2451 pa_sw_cvolume_divide(&s
->thread_info
.current_hw_volume
, &s
->real_volume
, &s
->soft_volume
);
2454 /* In case sink implementor reset SW volume. */
2455 if (!pa_cvolume_equal(&s
->thread_info
.soft_volume
, &s
->soft_volume
)) {
2456 s
->thread_info
.soft_volume
= s
->soft_volume
;
2457 pa_sink_request_rewind(s
, (size_t) -1);
2462 case PA_SINK_MESSAGE_SET_MUTE
:
2464 if (s
->thread_info
.soft_muted
!= s
->muted
) {
2465 s
->thread_info
.soft_muted
= s
->muted
;
2466 pa_sink_request_rewind(s
, (size_t) -1);
2469 if (s
->flags
& PA_SINK_SYNC_VOLUME
&& s
->set_mute
)
2474 case PA_SINK_MESSAGE_GET_MUTE
:
2476 if (s
->flags
& PA_SINK_SYNC_VOLUME
&& s
->get_mute
)
2481 case PA_SINK_MESSAGE_SET_STATE
: {
2483 pa_bool_t suspend_change
=
2484 (s
->thread_info
.state
== PA_SINK_SUSPENDED
&& PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata
))) ||
2485 (PA_SINK_IS_OPENED(s
->thread_info
.state
) && PA_PTR_TO_UINT(userdata
) == PA_SINK_SUSPENDED
);
2487 s
->thread_info
.state
= PA_PTR_TO_UINT(userdata
);
2489 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
) {
2490 s
->thread_info
.rewind_nbytes
= 0;
2491 s
->thread_info
.rewind_requested
= FALSE
;
2494 if (suspend_change
) {
2498 while ((i
= pa_hashmap_iterate(s
->thread_info
.inputs
, &state
, NULL
)))
2499 if (i
->suspend_within_thread
)
2500 i
->suspend_within_thread(i
, s
->thread_info
.state
== PA_SINK_SUSPENDED
);
2506 case PA_SINK_MESSAGE_DETACH
:
2508 /* Detach all streams */
2509 pa_sink_detach_within_thread(s
);
2512 case PA_SINK_MESSAGE_ATTACH
:
2514 /* Reattach all streams */
2515 pa_sink_attach_within_thread(s
);
2518 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY
: {
2520 pa_usec_t
*usec
= userdata
;
2521 *usec
= pa_sink_get_requested_latency_within_thread(s
);
2523 /* Yes, that's right, the IO thread will see -1 when no
2524 * explicit requested latency is configured, the main
2525 * thread will see max_latency */
2526 if (*usec
== (pa_usec_t
) -1)
2527 *usec
= s
->thread_info
.max_latency
;
2532 case PA_SINK_MESSAGE_SET_LATENCY_RANGE
: {
2533 pa_usec_t
*r
= userdata
;
2535 pa_sink_set_latency_range_within_thread(s
, r
[0], r
[1]);
2540 case PA_SINK_MESSAGE_GET_LATENCY_RANGE
: {
2541 pa_usec_t
*r
= userdata
;
2543 r
[0] = s
->thread_info
.min_latency
;
2544 r
[1] = s
->thread_info
.max_latency
;
2549 case PA_SINK_MESSAGE_GET_FIXED_LATENCY
:
2551 *((pa_usec_t
*) userdata
) = s
->thread_info
.fixed_latency
;
2554 case PA_SINK_MESSAGE_SET_FIXED_LATENCY
:
2556 pa_sink_set_fixed_latency_within_thread(s
, (pa_usec_t
) offset
);
2559 case PA_SINK_MESSAGE_GET_MAX_REWIND
:
2561 *((size_t*) userdata
) = s
->thread_info
.max_rewind
;
2564 case PA_SINK_MESSAGE_GET_MAX_REQUEST
:
2566 *((size_t*) userdata
) = s
->thread_info
.max_request
;
2569 case PA_SINK_MESSAGE_SET_MAX_REWIND
:
2571 pa_sink_set_max_rewind_within_thread(s
, (size_t) offset
);
2574 case PA_SINK_MESSAGE_SET_MAX_REQUEST
:
2576 pa_sink_set_max_request_within_thread(s
, (size_t) offset
);
2579 case PA_SINK_MESSAGE_SET_PORT
:
2581 pa_assert(userdata
);
2583 struct sink_message_set_port
*msg_data
= userdata
;
2584 msg_data
->ret
= s
->set_port(s
, msg_data
->port
);
2588 case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE
:
2589 /* This message is sent from IO-thread and handled in main thread. */
2590 pa_assert_ctl_context();
2592 pa_sink_get_volume(s
, TRUE
);
2593 pa_sink_get_mute(s
, TRUE
);
2596 case PA_SINK_MESSAGE_GET_LATENCY
:
2597 case PA_SINK_MESSAGE_MAX
:
2604 /* Called from main thread */
2605 int pa_sink_suspend_all(pa_core
*c
, pa_bool_t suspend
, pa_suspend_cause_t cause
) {
2610 pa_core_assert_ref(c
);
2611 pa_assert_ctl_context();
2612 pa_assert(cause
!= 0);
2614 PA_IDXSET_FOREACH(sink
, c
->sinks
, idx
) {
2617 if ((r
= pa_sink_suspend(sink
, suspend
, cause
)) < 0)
2624 /* Called from main thread */
2625 void pa_sink_detach(pa_sink
*s
) {
2626 pa_sink_assert_ref(s
);
2627 pa_assert_ctl_context();
2628 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2630 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_DETACH
, NULL
, 0, NULL
) == 0);
2633 /* Called from main thread */
2634 void pa_sink_attach(pa_sink
*s
) {
2635 pa_sink_assert_ref(s
);
2636 pa_assert_ctl_context();
2637 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2639 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_ATTACH
, NULL
, 0, NULL
) == 0);
2642 /* Called from IO thread */
2643 void pa_sink_detach_within_thread(pa_sink
*s
) {
2647 pa_sink_assert_ref(s
);
2648 pa_sink_assert_io_context(s
);
2649 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2651 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2655 if (s
->monitor_source
)
2656 pa_source_detach_within_thread(s
->monitor_source
);
2659 /* Called from IO thread */
2660 void pa_sink_attach_within_thread(pa_sink
*s
) {
2664 pa_sink_assert_ref(s
);
2665 pa_sink_assert_io_context(s
);
2666 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2668 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2672 if (s
->monitor_source
)
2673 pa_source_attach_within_thread(s
->monitor_source
);
2676 /* Called from IO thread */
2677 void pa_sink_request_rewind(pa_sink
*s
, size_t nbytes
) {
2678 pa_sink_assert_ref(s
);
2679 pa_sink_assert_io_context(s
);
2680 pa_assert(PA_SINK_IS_LINKED(s
->thread_info
.state
));
2682 if (s
->thread_info
.state
== PA_SINK_SUSPENDED
)
2685 if (nbytes
== (size_t) -1)
2686 nbytes
= s
->thread_info
.max_rewind
;
2688 nbytes
= PA_MIN(nbytes
, s
->thread_info
.max_rewind
);
2690 if (s
->thread_info
.rewind_requested
&&
2691 nbytes
<= s
->thread_info
.rewind_nbytes
)
2694 s
->thread_info
.rewind_nbytes
= nbytes
;
2695 s
->thread_info
.rewind_requested
= TRUE
;
2697 if (s
->request_rewind
)
2698 s
->request_rewind(s
);
2701 /* Called from IO thread */
2702 pa_usec_t
pa_sink_get_requested_latency_within_thread(pa_sink
*s
) {
2703 pa_usec_t result
= (pa_usec_t
) -1;
2706 pa_usec_t monitor_latency
;
2708 pa_sink_assert_ref(s
);
2709 pa_sink_assert_io_context(s
);
2711 if (!(s
->flags
& PA_SINK_DYNAMIC_LATENCY
))
2712 return PA_CLAMP(s
->thread_info
.fixed_latency
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
2714 if (s
->thread_info
.requested_latency_valid
)
2715 return s
->thread_info
.requested_latency
;
2717 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2718 if (i
->thread_info
.requested_sink_latency
!= (pa_usec_t
) -1 &&
2719 (result
== (pa_usec_t
) -1 || result
> i
->thread_info
.requested_sink_latency
))
2720 result
= i
->thread_info
.requested_sink_latency
;
2722 monitor_latency
= pa_source_get_requested_latency_within_thread(s
->monitor_source
);
2724 if (monitor_latency
!= (pa_usec_t
) -1 &&
2725 (result
== (pa_usec_t
) -1 || result
> monitor_latency
))
2726 result
= monitor_latency
;
2728 if (result
!= (pa_usec_t
) -1)
2729 result
= PA_CLAMP(result
, s
->thread_info
.min_latency
, s
->thread_info
.max_latency
);
2731 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
2732 /* Only cache if properly initialized */
2733 s
->thread_info
.requested_latency
= result
;
2734 s
->thread_info
.requested_latency_valid
= TRUE
;
2740 /* Called from main thread */
2741 pa_usec_t
pa_sink_get_requested_latency(pa_sink
*s
) {
2744 pa_sink_assert_ref(s
);
2745 pa_assert_ctl_context();
2746 pa_assert(PA_SINK_IS_LINKED(s
->state
));
2748 if (s
->state
== PA_SINK_SUSPENDED
)
2751 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY
, &usec
, 0, NULL
) == 0);
2756 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2757 void pa_sink_set_max_rewind_within_thread(pa_sink
*s
, size_t max_rewind
) {
2761 pa_sink_assert_ref(s
);
2762 pa_sink_assert_io_context(s
);
2764 if (max_rewind
== s
->thread_info
.max_rewind
)
2767 s
->thread_info
.max_rewind
= max_rewind
;
2769 if (PA_SINK_IS_LINKED(s
->thread_info
.state
))
2770 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2771 pa_sink_input_update_max_rewind(i
, s
->thread_info
.max_rewind
);
2773 if (s
->monitor_source
)
2774 pa_source_set_max_rewind_within_thread(s
->monitor_source
, s
->thread_info
.max_rewind
);
2777 /* Called from main thread */
2778 void pa_sink_set_max_rewind(pa_sink
*s
, size_t max_rewind
) {
2779 pa_sink_assert_ref(s
);
2780 pa_assert_ctl_context();
2782 if (PA_SINK_IS_LINKED(s
->state
))
2783 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MAX_REWIND
, NULL
, max_rewind
, NULL
) == 0);
2785 pa_sink_set_max_rewind_within_thread(s
, max_rewind
);
2788 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2789 void pa_sink_set_max_request_within_thread(pa_sink
*s
, size_t max_request
) {
2792 pa_sink_assert_ref(s
);
2793 pa_sink_assert_io_context(s
);
2795 if (max_request
== s
->thread_info
.max_request
)
2798 s
->thread_info
.max_request
= max_request
;
2800 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
2803 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2804 pa_sink_input_update_max_request(i
, s
->thread_info
.max_request
);
2808 /* Called from main thread */
2809 void pa_sink_set_max_request(pa_sink
*s
, size_t max_request
) {
2810 pa_sink_assert_ref(s
);
2811 pa_assert_ctl_context();
2813 if (PA_SINK_IS_LINKED(s
->state
))
2814 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_MAX_REQUEST
, NULL
, max_request
, NULL
) == 0);
2816 pa_sink_set_max_request_within_thread(s
, max_request
);
2819 /* Called from IO thread */
2820 void pa_sink_invalidate_requested_latency(pa_sink
*s
, pa_bool_t dynamic
) {
2824 pa_sink_assert_ref(s
);
2825 pa_sink_assert_io_context(s
);
2827 if ((s
->flags
& PA_SINK_DYNAMIC_LATENCY
))
2828 s
->thread_info
.requested_latency_valid
= FALSE
;
2832 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
2834 if (s
->update_requested_latency
)
2835 s
->update_requested_latency(s
);
2837 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2838 if (i
->update_sink_requested_latency
)
2839 i
->update_sink_requested_latency(i
);
2843 /* Called from main thread */
2844 void pa_sink_set_latency_range(pa_sink
*s
, pa_usec_t min_latency
, pa_usec_t max_latency
) {
2845 pa_sink_assert_ref(s
);
2846 pa_assert_ctl_context();
2848 /* min_latency == 0: no limit
2849 * min_latency anything else: specified limit
2851 * Similar for max_latency */
2853 if (min_latency
< ABSOLUTE_MIN_LATENCY
)
2854 min_latency
= ABSOLUTE_MIN_LATENCY
;
2856 if (max_latency
<= 0 ||
2857 max_latency
> ABSOLUTE_MAX_LATENCY
)
2858 max_latency
= ABSOLUTE_MAX_LATENCY
;
2860 pa_assert(min_latency
<= max_latency
);
2862 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2863 pa_assert((min_latency
== ABSOLUTE_MIN_LATENCY
&&
2864 max_latency
== ABSOLUTE_MAX_LATENCY
) ||
2865 (s
->flags
& PA_SINK_DYNAMIC_LATENCY
));
2867 if (PA_SINK_IS_LINKED(s
->state
)) {
2873 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_LATENCY_RANGE
, r
, 0, NULL
) == 0);
2875 pa_sink_set_latency_range_within_thread(s
, min_latency
, max_latency
);
2878 /* Called from main thread */
2879 void pa_sink_get_latency_range(pa_sink
*s
, pa_usec_t
*min_latency
, pa_usec_t
*max_latency
) {
2880 pa_sink_assert_ref(s
);
2881 pa_assert_ctl_context();
2882 pa_assert(min_latency
);
2883 pa_assert(max_latency
);
2885 if (PA_SINK_IS_LINKED(s
->state
)) {
2886 pa_usec_t r
[2] = { 0, 0 };
2888 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_LATENCY_RANGE
, r
, 0, NULL
) == 0);
2890 *min_latency
= r
[0];
2891 *max_latency
= r
[1];
2893 *min_latency
= s
->thread_info
.min_latency
;
2894 *max_latency
= s
->thread_info
.max_latency
;
2898 /* Called from IO thread */
2899 void pa_sink_set_latency_range_within_thread(pa_sink
*s
, pa_usec_t min_latency
, pa_usec_t max_latency
) {
2900 pa_sink_assert_ref(s
);
2901 pa_sink_assert_io_context(s
);
2903 pa_assert(min_latency
>= ABSOLUTE_MIN_LATENCY
);
2904 pa_assert(max_latency
<= ABSOLUTE_MAX_LATENCY
);
2905 pa_assert(min_latency
<= max_latency
);
2907 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2908 pa_assert((min_latency
== ABSOLUTE_MIN_LATENCY
&&
2909 max_latency
== ABSOLUTE_MAX_LATENCY
) ||
2910 (s
->flags
& PA_SINK_DYNAMIC_LATENCY
));
2912 if (s
->thread_info
.min_latency
== min_latency
&&
2913 s
->thread_info
.max_latency
== max_latency
)
2916 s
->thread_info
.min_latency
= min_latency
;
2917 s
->thread_info
.max_latency
= max_latency
;
2919 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
2923 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2924 if (i
->update_sink_latency_range
)
2925 i
->update_sink_latency_range(i
);
2928 pa_sink_invalidate_requested_latency(s
, FALSE
);
2930 pa_source_set_latency_range_within_thread(s
->monitor_source
, min_latency
, max_latency
);
2933 /* Called from main thread */
2934 void pa_sink_set_fixed_latency(pa_sink
*s
, pa_usec_t latency
) {
2935 pa_sink_assert_ref(s
);
2936 pa_assert_ctl_context();
2938 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
) {
2939 pa_assert(latency
== 0);
2943 if (latency
< ABSOLUTE_MIN_LATENCY
)
2944 latency
= ABSOLUTE_MIN_LATENCY
;
2946 if (latency
> ABSOLUTE_MAX_LATENCY
)
2947 latency
= ABSOLUTE_MAX_LATENCY
;
2949 if (PA_SINK_IS_LINKED(s
->state
))
2950 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_FIXED_LATENCY
, NULL
, (int64_t) latency
, NULL
) == 0);
2952 s
->thread_info
.fixed_latency
= latency
;
2954 pa_source_set_fixed_latency(s
->monitor_source
, latency
);
2957 /* Called from main thread */
2958 pa_usec_t
pa_sink_get_fixed_latency(pa_sink
*s
) {
2961 pa_sink_assert_ref(s
);
2962 pa_assert_ctl_context();
2964 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
)
2967 if (PA_SINK_IS_LINKED(s
->state
))
2968 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_FIXED_LATENCY
, &latency
, 0, NULL
) == 0);
2970 latency
= s
->thread_info
.fixed_latency
;
2975 /* Called from IO thread */
2976 void pa_sink_set_fixed_latency_within_thread(pa_sink
*s
, pa_usec_t latency
) {
2977 pa_sink_assert_ref(s
);
2978 pa_sink_assert_io_context(s
);
2980 if (s
->flags
& PA_SINK_DYNAMIC_LATENCY
) {
2981 pa_assert(latency
== 0);
2985 pa_assert(latency
>= ABSOLUTE_MIN_LATENCY
);
2986 pa_assert(latency
<= ABSOLUTE_MAX_LATENCY
);
2988 if (s
->thread_info
.fixed_latency
== latency
)
2991 s
->thread_info
.fixed_latency
= latency
;
2993 if (PA_SINK_IS_LINKED(s
->thread_info
.state
)) {
2997 PA_HASHMAP_FOREACH(i
, s
->thread_info
.inputs
, state
)
2998 if (i
->update_sink_fixed_latency
)
2999 i
->update_sink_fixed_latency(i
);
3002 pa_sink_invalidate_requested_latency(s
, FALSE
);
3004 pa_source_set_fixed_latency_within_thread(s
->monitor_source
, latency
);
3007 /* Called from main context */
3008 size_t pa_sink_get_max_rewind(pa_sink
*s
) {
3010 pa_assert_ctl_context();
3011 pa_sink_assert_ref(s
);
3013 if (!PA_SINK_IS_LINKED(s
->state
))
3014 return s
->thread_info
.max_rewind
;
3016 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MAX_REWIND
, &r
, 0, NULL
) == 0);
3021 /* Called from main context */
3022 size_t pa_sink_get_max_request(pa_sink
*s
) {
3024 pa_sink_assert_ref(s
);
3025 pa_assert_ctl_context();
3027 if (!PA_SINK_IS_LINKED(s
->state
))
3028 return s
->thread_info
.max_request
;
3030 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_GET_MAX_REQUEST
, &r
, 0, NULL
) == 0);
3035 /* Called from main context */
3036 int pa_sink_set_port(pa_sink
*s
, const char *name
, pa_bool_t save
) {
3037 pa_device_port
*port
;
3040 pa_sink_assert_ref(s
);
3041 pa_assert_ctl_context();
3044 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s
->index
, s
->name
);
3045 return -PA_ERR_NOTIMPLEMENTED
;
3049 return -PA_ERR_NOENTITY
;
3051 if (!(port
= pa_hashmap_get(s
->ports
, name
)))
3052 return -PA_ERR_NOENTITY
;
3054 if (s
->active_port
== port
) {
3055 s
->save_port
= s
->save_port
|| save
;
3059 if (s
->flags
& PA_SINK_SYNC_VOLUME
) {
3060 struct sink_message_set_port msg
= { .port
= port
, .ret
= 0 };
3061 pa_assert_se(pa_asyncmsgq_send(s
->asyncmsgq
, PA_MSGOBJECT(s
), PA_SINK_MESSAGE_SET_PORT
, &msg
, 0, NULL
) == 0);
3065 ret
= s
->set_port(s
, port
);
3068 return -PA_ERR_NOENTITY
;
3070 pa_subscription_post(s
->core
, PA_SUBSCRIPTION_EVENT_SINK
|PA_SUBSCRIPTION_EVENT_CHANGE
, s
->index
);
3072 pa_log_info("Changed port of sink %u \"%s\" to %s", s
->index
, s
->name
, port
->name
);
3074 s
->active_port
= port
;
3075 s
->save_port
= save
;
3077 pa_hook_fire(&s
->core
->hooks
[PA_CORE_HOOK_SINK_PORT_CHANGED
], s
);
3082 pa_bool_t
pa_device_init_icon(pa_proplist
*p
, pa_bool_t is_sink
) {
3083 const char *ff
, *c
, *t
= NULL
, *s
= "", *profile
, *bus
;
3087 if (pa_proplist_contains(p
, PA_PROP_DEVICE_ICON_NAME
))
3090 if ((ff
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
))) {
3092 if (pa_streq(ff
, "microphone"))
3093 t
= "audio-input-microphone";
3094 else if (pa_streq(ff
, "webcam"))
3096 else if (pa_streq(ff
, "computer"))
3098 else if (pa_streq(ff
, "handset"))
3100 else if (pa_streq(ff
, "portable"))
3101 t
= "multimedia-player";
3102 else if (pa_streq(ff
, "tv"))
3103 t
= "video-display";
3106 * The following icons are not part of the icon naming spec,
3107 * because Rodney Dawes sucks as the maintainer of that spec.
3109 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
3111 else if (pa_streq(ff
, "headset"))
3112 t
= "audio-headset";
3113 else if (pa_streq(ff
, "headphone"))
3114 t
= "audio-headphones";
3115 else if (pa_streq(ff
, "speaker"))
3116 t
= "audio-speakers";
3117 else if (pa_streq(ff
, "hands-free"))
3118 t
= "audio-handsfree";
3122 if ((c
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
)))
3123 if (pa_streq(c
, "modem"))
3130 t
= "audio-input-microphone";
3133 if ((profile
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_NAME
))) {
3134 if (strstr(profile
, "analog"))
3136 else if (strstr(profile
, "iec958"))
3138 else if (strstr(profile
, "hdmi"))
3142 bus
= pa_proplist_gets(p
, PA_PROP_DEVICE_BUS
);
3144 pa_proplist_setf(p
, PA_PROP_DEVICE_ICON_NAME
, "%s%s%s%s", t
, pa_strempty(s
), bus
? "-" : "", pa_strempty(bus
));
3149 pa_bool_t
pa_device_init_description(pa_proplist
*p
) {
3150 const char *s
, *d
= NULL
, *k
;
3153 if (pa_proplist_contains(p
, PA_PROP_DEVICE_DESCRIPTION
))
3156 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
)))
3157 if (pa_streq(s
, "internal"))
3158 d
= _("Internal Audio");
3161 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
)))
3162 if (pa_streq(s
, "modem"))
3166 d
= pa_proplist_gets(p
, PA_PROP_DEVICE_PRODUCT_NAME
);
3171 k
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_DESCRIPTION
);
3174 pa_proplist_setf(p
, PA_PROP_DEVICE_DESCRIPTION
, _("%s %s"), d
, k
);
3176 pa_proplist_sets(p
, PA_PROP_DEVICE_DESCRIPTION
, d
);
3181 pa_bool_t
pa_device_init_intended_roles(pa_proplist
*p
) {
3185 if (pa_proplist_contains(p
, PA_PROP_DEVICE_INTENDED_ROLES
))
3188 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
)))
3189 if (pa_streq(s
, "handset") || pa_streq(s
, "hands-free")
3190 || pa_streq(s
, "headset")) {
3191 pa_proplist_sets(p
, PA_PROP_DEVICE_INTENDED_ROLES
, "phone");
3198 unsigned pa_device_init_priority(pa_proplist
*p
) {
3200 unsigned priority
= 0;
3204 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_CLASS
))) {
3206 if (pa_streq(s
, "sound"))
3208 else if (!pa_streq(s
, "modem"))
3212 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_FORM_FACTOR
))) {
3214 if (pa_streq(s
, "internal"))
3216 else if (pa_streq(s
, "speaker"))
3218 else if (pa_streq(s
, "headphone"))
3222 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_BUS
))) {
3224 if (pa_streq(s
, "pci"))
3226 else if (pa_streq(s
, "usb"))
3228 else if (pa_streq(s
, "bluetooth"))
3232 if ((s
= pa_proplist_gets(p
, PA_PROP_DEVICE_PROFILE_NAME
))) {
3234 if (pa_startswith(s
, "analog-"))
3236 else if (pa_startswith(s
, "iec958-"))
3243 PA_STATIC_FLIST_DECLARE(pa_sink_volume_change
, 0, pa_xfree
);
3245 /* Called from the IO thread. */
3246 static pa_sink_volume_change
*pa_sink_volume_change_new(pa_sink
*s
) {
3247 pa_sink_volume_change
*c
;
3248 if (!(c
= pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change
))))
3249 c
= pa_xnew(pa_sink_volume_change
, 1);
3251 PA_LLIST_INIT(pa_sink_volume_change
, c
);
3253 pa_cvolume_reset(&c
->hw_volume
, s
->sample_spec
.channels
);
3257 /* Called from the IO thread. */
3258 static void pa_sink_volume_change_free(pa_sink_volume_change
*c
) {
3260 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change
), c
) < 0)
3264 /* Called from the IO thread. */
3265 void pa_sink_volume_change_push(pa_sink
*s
) {
3266 pa_sink_volume_change
*c
= NULL
;
3267 pa_sink_volume_change
*nc
= NULL
;
3268 uint32_t safety_margin
= s
->thread_info
.volume_change_safety_margin
;
3270 const char *direction
= NULL
;
3273 nc
= pa_sink_volume_change_new(s
);
3275 /* NOTE: There is already more different volumes in pa_sink that I can remember.
3276 * Adding one more volume for HW would get us rid of this, but I am trying
3277 * to survive with the ones we already have. */
3278 pa_sw_cvolume_divide(&nc
->hw_volume
, &s
->real_volume
, &s
->soft_volume
);
3280 if (!s
->thread_info
.volume_changes
&& pa_cvolume_equal(&nc
->hw_volume
, &s
->thread_info
.current_hw_volume
)) {
3281 pa_log_debug("Volume not changing");
3282 pa_sink_volume_change_free(nc
);
3286 nc
->at
= pa_sink_get_latency_within_thread(s
);
3287 nc
->at
+= pa_rtclock_now() + s
->thread_info
.volume_change_extra_delay
;
3289 if (s
->thread_info
.volume_changes_tail
) {
3290 for (c
= s
->thread_info
.volume_changes_tail
; c
; c
= c
->prev
) {
3291 /* If volume is going up let's do it a bit late. If it is going
3292 * down let's do it a bit early. */
3293 if (pa_cvolume_avg(&nc
->hw_volume
) > pa_cvolume_avg(&c
->hw_volume
)) {
3294 if (nc
->at
+ safety_margin
> c
->at
) {
3295 nc
->at
+= safety_margin
;
3300 else if (nc
->at
- safety_margin
> c
->at
) {
3301 nc
->at
-= safety_margin
;
3309 if (pa_cvolume_avg(&nc
->hw_volume
) > pa_cvolume_avg(&s
->thread_info
.current_hw_volume
)) {
3310 nc
->at
+= safety_margin
;
3313 nc
->at
-= safety_margin
;
3316 PA_LLIST_PREPEND(pa_sink_volume_change
, s
->thread_info
.volume_changes
, nc
);
3319 PA_LLIST_INSERT_AFTER(pa_sink_volume_change
, s
->thread_info
.volume_changes
, c
, nc
);
3322 pa_log_debug("Volume going %s to %d at %llu", direction
, pa_cvolume_avg(&nc
->hw_volume
), (long long unsigned) nc
->at
);
3324 /* We can ignore volume events that came earlier but should happen later than this. */
3325 PA_LLIST_FOREACH(c
, nc
->next
) {
3326 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c
->hw_volume
), (long long unsigned) c
->at
);
3327 pa_sink_volume_change_free(c
);
3330 s
->thread_info
.volume_changes_tail
= nc
;
3333 /* Called from the IO thread. */
3334 static void pa_sink_volume_change_flush(pa_sink
*s
) {
3335 pa_sink_volume_change
*c
= s
->thread_info
.volume_changes
;
3337 s
->thread_info
.volume_changes
= NULL
;
3338 s
->thread_info
.volume_changes_tail
= NULL
;
3340 pa_sink_volume_change
*next
= c
->next
;
3341 pa_sink_volume_change_free(c
);
3346 /* Called from the IO thread. */
3347 pa_bool_t
pa_sink_volume_change_apply(pa_sink
*s
, pa_usec_t
*usec_to_next
) {
3348 pa_usec_t now
= pa_rtclock_now();
3349 pa_bool_t ret
= FALSE
;
3352 pa_assert(s
->write_volume
);
3354 while (s
->thread_info
.volume_changes
&& now
>= s
->thread_info
.volume_changes
->at
) {
3355 pa_sink_volume_change
*c
= s
->thread_info
.volume_changes
;
3356 PA_LLIST_REMOVE(pa_sink_volume_change
, s
->thread_info
.volume_changes
, c
);
3357 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
3358 pa_cvolume_avg(&c
->hw_volume
), (long long unsigned) c
->at
, (long long unsigned) (now
- c
->at
));
3360 s
->thread_info
.current_hw_volume
= c
->hw_volume
;
3361 pa_sink_volume_change_free(c
);
3364 if (s
->write_volume
&& ret
)
3367 if (s
->thread_info
.volume_changes
) {
3369 *usec_to_next
= s
->thread_info
.volume_changes
->at
- now
;
3370 if (pa_log_ratelimit(PA_LOG_DEBUG
))
3371 pa_log_debug("Next volume change in %lld usec", (long long) (s
->thread_info
.volume_changes
->at
- now
));
3376 s
->thread_info
.volume_changes_tail
= NULL
;
3381 /* Called from the IO thread. */
3382 static void pa_sink_volume_change_rewind(pa_sink
*s
, size_t nbytes
) {
3383 /* All the queued volume events later than current latency are shifted to happen earlier. */
3384 pa_sink_volume_change
*c
;
3385 pa_volume_t prev_vol
= pa_cvolume_avg(&s
->thread_info
.current_hw_volume
);
3386 pa_usec_t rewound
= pa_bytes_to_usec(nbytes
, &s
->sample_spec
);
3387 pa_usec_t limit
= pa_sink_get_latency_within_thread(s
);
3389 pa_log_debug("latency = %lld", (long long) limit
);
3390 limit
+= pa_rtclock_now() + s
->thread_info
.volume_change_extra_delay
;
3392 PA_LLIST_FOREACH(c
, s
->thread_info
.volume_changes
) {
3393 pa_usec_t modified_limit
= limit
;
3394 if (prev_vol
> pa_cvolume_avg(&c
->hw_volume
))
3395 modified_limit
-= s
->thread_info
.volume_change_safety_margin
;
3397 modified_limit
+= s
->thread_info
.volume_change_safety_margin
;
3398 if (c
->at
> modified_limit
) {
3400 if (c
->at
< modified_limit
)
3401 c
->at
= modified_limit
;
3403 prev_vol
= pa_cvolume_avg(&c
->hw_volume
);
3405 pa_sink_volume_change_apply(s
, NULL
);
3408 /* Called from the main thread */
3409 /* Gets the list of formats supported by the sink. The members and idxset must
3410 * be freed by the caller. */
3411 pa_idxset
* pa_sink_get_formats(pa_sink
*s
) {
3416 if (s
->get_formats
) {
3417 /* Sink supports format query, all is good */
3418 ret
= s
->get_formats(s
);
3420 /* Sink doesn't support format query, so assume it does PCM */
3421 pa_format_info
*f
= pa_format_info_new();
3422 f
->encoding
= PA_ENCODING_PCM
;
3424 ret
= pa_idxset_new(NULL
, NULL
);
3425 pa_idxset_put(ret
, f
, NULL
);
3431 /* Called from the main thread */
3432 /* Checks if the sink can accept this format */
3433 pa_bool_t
pa_sink_check_format(pa_sink
*s
, pa_format_info
*f
)
3435 pa_idxset
*formats
= NULL
;
3436 pa_bool_t ret
= FALSE
;
3441 formats
= pa_sink_get_formats(s
);
3444 pa_format_info
*finfo_device
;
3447 PA_IDXSET_FOREACH(finfo_device
, formats
, i
) {
3448 if (pa_format_info_is_compatible(finfo_device
, f
)) {
3454 pa_idxset_free(formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);
3460 /* Called from the main thread */
3461 /* Calculates the intersection between formats supported by the sink and
3462 * in_formats, and returns these, in the order of the sink's formats. */
3463 pa_idxset
* pa_sink_check_formats(pa_sink
*s
, pa_idxset
*in_formats
) {
3464 pa_idxset
*out_formats
= pa_idxset_new(NULL
, NULL
), *sink_formats
= NULL
;
3465 pa_format_info
*f_sink
, *f_in
;
3470 if (!in_formats
|| pa_idxset_isempty(in_formats
))
3473 sink_formats
= pa_sink_get_formats(s
);
3475 PA_IDXSET_FOREACH(f_sink
, sink_formats
, i
) {
3476 PA_IDXSET_FOREACH(f_in
, in_formats
, j
) {
3477 if (pa_format_info_is_compatible(f_sink
, f_in
))
3478 pa_idxset_put(out_formats
, pa_format_info_copy(f_in
), NULL
);
3484 pa_idxset_free(sink_formats
, (pa_free2_cb_t
) pa_format_info_free2
, NULL
);