]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
c0bc1c996ef317043872e5d41d35318ae4467884
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/flist.h>
45
46 #include "source.h"
47
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53
54 struct pa_source_volume_change {
55 pa_usec_t at;
56 pa_cvolume hw_volume;
57
58 PA_LLIST_FIELDS(pa_source_volume_change);
59 };
60
61 struct source_message_set_port {
62 pa_device_port *port;
63 int ret;
64 };
65
66 static void source_free(pa_object *o);
67
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
70
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
72 pa_assert(data);
73
74 pa_zero(*data);
75 data->proplist = pa_proplist_new();
76 data->ports = pa_hashmap_new_full(pa_idxset_string_hash_func, pa_idxset_string_compare_func, NULL, (pa_free_cb_t) pa_device_port_unref);
77
78 return data;
79 }
80
81 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
82 pa_assert(data);
83
84 pa_xfree(data->name);
85 data->name = pa_xstrdup(name);
86 }
87
88 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 pa_assert(data);
90
91 if ((data->sample_spec_is_set = !!spec))
92 data->sample_spec = *spec;
93 }
94
95 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 pa_assert(data);
97
98 if ((data->channel_map_is_set = !!map))
99 data->channel_map = *map;
100 }
101
102 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 pa_assert(data);
104
105 data->alternate_sample_rate_is_set = true;
106 data->alternate_sample_rate = alternate_sample_rate;
107 }
108
109 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 pa_assert(data);
111
112 if ((data->volume_is_set = !!volume))
113 data->volume = *volume;
114 }
115
116 void pa_source_new_data_set_muted(pa_source_new_data *data, bool mute) {
117 pa_assert(data);
118
119 data->muted_is_set = true;
120 data->muted = !!mute;
121 }
122
123 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_assert(data);
125
126 pa_xfree(data->active_port);
127 data->active_port = pa_xstrdup(port);
128 }
129
130 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_assert(data);
132
133 pa_proplist_free(data->proplist);
134
135 if (data->ports)
136 pa_hashmap_free(data->ports);
137
138 pa_xfree(data->name);
139 pa_xfree(data->active_port);
140 }
141
142 /* Called from main context */
143 static void reset_callbacks(pa_source *s) {
144 pa_assert(s);
145
146 s->set_state = NULL;
147 s->get_volume = NULL;
148 s->set_volume = NULL;
149 s->write_volume = NULL;
150 s->get_mute = NULL;
151 s->set_mute = NULL;
152 s->update_requested_latency = NULL;
153 s->set_port = NULL;
154 s->get_formats = NULL;
155 s->update_rate = NULL;
156 }
157
158 /* Called from main context */
159 pa_source* pa_source_new(
160 pa_core *core,
161 pa_source_new_data *data,
162 pa_source_flags_t flags) {
163
164 pa_source *s;
165 const char *name;
166 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
167 char *pt;
168
169 pa_assert(core);
170 pa_assert(data);
171 pa_assert(data->name);
172 pa_assert_ctl_context();
173
174 s = pa_msgobject_new(pa_source);
175
176 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
177 pa_log_debug("Failed to register name %s.", data->name);
178 pa_xfree(s);
179 return NULL;
180 }
181
182 pa_source_new_data_set_name(data, name);
183
184 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
185 pa_xfree(s);
186 pa_namereg_unregister(core, name);
187 return NULL;
188 }
189
190 /* FIXME, need to free s here on failure */
191
192 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
193 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
194
195 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
196
197 if (!data->channel_map_is_set)
198 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
199
200 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
201 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
202
203 /* FIXME: There should probably be a general function for checking whether
204 * the source volume is allowed to be set, like there is for source outputs. */
205 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
206
207 if (!data->volume_is_set) {
208 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
209 data->save_volume = false;
210 }
211
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
214
215 if (!data->muted_is_set)
216 data->muted = false;
217
218 if (data->card)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
220
221 pa_device_init_description(data->proplist, data->card);
222 pa_device_init_icon(data->proplist, false);
223 pa_device_init_intended_roles(data->proplist);
224
225 if (!data->active_port) {
226 pa_device_port *p = pa_device_port_find_best(data->ports);
227 if (p)
228 pa_source_new_data_set_port(data, p->name);
229 }
230
231 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
232 pa_xfree(s);
233 pa_namereg_unregister(core, name);
234 return NULL;
235 }
236
237 s->parent.parent.free = source_free;
238 s->parent.process_msg = pa_source_process_msg;
239
240 s->core = core;
241 s->state = PA_SOURCE_INIT;
242 s->flags = flags;
243 s->priority = 0;
244 s->suspend_cause = data->suspend_cause;
245 pa_source_set_mixer_dirty(s, false);
246 s->name = pa_xstrdup(name);
247 s->proplist = pa_proplist_copy(data->proplist);
248 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
249 s->module = data->module;
250 s->card = data->card;
251
252 s->priority = pa_device_init_priority(s->proplist);
253
254 s->sample_spec = data->sample_spec;
255 s->channel_map = data->channel_map;
256 s->default_sample_rate = s->sample_spec.rate;
257
258 if (data->alternate_sample_rate_is_set)
259 s->alternate_sample_rate = data->alternate_sample_rate;
260 else
261 s->alternate_sample_rate = s->core->alternate_sample_rate;
262
263 if (s->sample_spec.rate == s->alternate_sample_rate) {
264 pa_log_warn("Default and alternate sample rates are the same.");
265 s->alternate_sample_rate = 0;
266 }
267
268 s->outputs = pa_idxset_new(NULL, NULL);
269 s->n_corked = 0;
270 s->monitor_of = NULL;
271 s->output_from_master = NULL;
272
273 s->reference_volume = s->real_volume = data->volume;
274 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
275 s->base_volume = PA_VOLUME_NORM;
276 s->n_volume_steps = PA_VOLUME_NORM+1;
277 s->muted = data->muted;
278 s->refresh_volume = s->refresh_muted = false;
279
280 reset_callbacks(s);
281 s->userdata = NULL;
282
283 s->asyncmsgq = NULL;
284
285 /* As a minor optimization we just steal the list instead of
286 * copying it here */
287 s->ports = data->ports;
288 data->ports = NULL;
289
290 s->active_port = NULL;
291 s->save_port = false;
292
293 if (data->active_port)
294 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
295 s->save_port = data->save_port;
296
297 /* Hopefully the active port has already been assigned in the previous call
298 to pa_device_port_find_best, but better safe than sorry */
299 if (!s->active_port)
300 s->active_port = pa_device_port_find_best(s->ports);
301
302 if (s->active_port)
303 s->latency_offset = s->active_port->latency_offset;
304 else
305 s->latency_offset = 0;
306
307 s->save_volume = data->save_volume;
308 s->save_muted = data->save_muted;
309
310 pa_silence_memchunk_get(
311 &core->silence_cache,
312 core->mempool,
313 &s->silence,
314 &s->sample_spec,
315 0);
316
317 s->thread_info.rtpoll = NULL;
318 s->thread_info.outputs = pa_hashmap_new_full(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func, NULL,
319 (pa_free_cb_t) pa_source_output_unref);
320 s->thread_info.soft_volume = s->soft_volume;
321 s->thread_info.soft_muted = s->muted;
322 s->thread_info.state = s->state;
323 s->thread_info.max_rewind = 0;
324 s->thread_info.requested_latency_valid = false;
325 s->thread_info.requested_latency = 0;
326 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
327 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
328 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
329
330 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
331 s->thread_info.volume_changes_tail = NULL;
332 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
333 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
334 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
335 s->thread_info.latency_offset = s->latency_offset;
336
337 /* FIXME: This should probably be moved to pa_source_put() */
338 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
339
340 if (s->card)
341 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
342
343 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
344 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
345 s->index,
346 s->name,
347 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
348 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
349 pt);
350 pa_xfree(pt);
351
352 return s;
353 }
354
355 /* Called from main context */
356 static int source_set_state(pa_source *s, pa_source_state_t state) {
357 int ret;
358 bool suspend_change;
359 pa_source_state_t original_state;
360
361 pa_assert(s);
362 pa_assert_ctl_context();
363
364 if (s->state == state)
365 return 0;
366
367 original_state = s->state;
368
369 suspend_change =
370 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
371 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
372
373 if (s->set_state)
374 if ((ret = s->set_state(s, state)) < 0)
375 return ret;
376
377 if (s->asyncmsgq)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379
380 if (s->set_state)
381 s->set_state(s, original_state);
382
383 return ret;
384 }
385
386 s->state = state;
387
388 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 }
392
393 if (suspend_change) {
394 pa_source_output *o;
395 uint32_t idx;
396
397 /* We're suspending or resuming, tell everyone about it */
398
399 PA_IDXSET_FOREACH(o, s->outputs, idx)
400 if (s->state == PA_SOURCE_SUSPENDED &&
401 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
402 pa_source_output_kill(o);
403 else if (o->suspend)
404 o->suspend(o, state == PA_SOURCE_SUSPENDED);
405 }
406
407 return 0;
408 }
409
410 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
411 pa_assert(s);
412
413 s->get_volume = cb;
414 }
415
416 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
417 pa_source_flags_t flags;
418
419 pa_assert(s);
420 pa_assert(!s->write_volume || cb);
421
422 s->set_volume = cb;
423
424 /* Save the current flags so we can tell if they've changed */
425 flags = s->flags;
426
427 if (cb) {
428 /* The source implementor is responsible for setting decibel volume support */
429 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
430 } else {
431 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
432 /* See note below in pa_source_put() about volume sharing and decibel volumes */
433 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
434 }
435
436 /* If the flags have changed after init, let any clients know via a change event */
437 if (s->state != PA_SOURCE_INIT && flags != s->flags)
438 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
439 }
440
441 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
442 pa_source_flags_t flags;
443
444 pa_assert(s);
445 pa_assert(!cb || s->set_volume);
446
447 s->write_volume = cb;
448
449 /* Save the current flags so we can tell if they've changed */
450 flags = s->flags;
451
452 if (cb)
453 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
454 else
455 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
456
457 /* If the flags have changed after init, let any clients know via a change event */
458 if (s->state != PA_SOURCE_INIT && flags != s->flags)
459 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
460 }
461
462 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
463 pa_assert(s);
464
465 s->get_mute = cb;
466 }
467
468 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
469 pa_source_flags_t flags;
470
471 pa_assert(s);
472
473 s->set_mute = cb;
474
475 /* Save the current flags so we can tell if they've changed */
476 flags = s->flags;
477
478 if (cb)
479 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
480 else
481 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
482
483 /* If the flags have changed after init, let any clients know via a change event */
484 if (s->state != PA_SOURCE_INIT && flags != s->flags)
485 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
486 }
487
488 static void enable_flat_volume(pa_source *s, bool enable) {
489 pa_source_flags_t flags;
490
491 pa_assert(s);
492
493 /* Always follow the overall user preference here */
494 enable = enable && s->core->flat_volumes;
495
496 /* Save the current flags so we can tell if they've changed */
497 flags = s->flags;
498
499 if (enable)
500 s->flags |= PA_SOURCE_FLAT_VOLUME;
501 else
502 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
503
504 /* If the flags have changed after init, let any clients know via a change event */
505 if (s->state != PA_SOURCE_INIT && flags != s->flags)
506 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
507 }
508
509 void pa_source_enable_decibel_volume(pa_source *s, bool enable) {
510 pa_source_flags_t flags;
511
512 pa_assert(s);
513
514 /* Save the current flags so we can tell if they've changed */
515 flags = s->flags;
516
517 if (enable) {
518 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
519 enable_flat_volume(s, true);
520 } else {
521 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
522 enable_flat_volume(s, false);
523 }
524
525 /* If the flags have changed after init, let any clients know via a change event */
526 if (s->state != PA_SOURCE_INIT && flags != s->flags)
527 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
528 }
529
530 /* Called from main context */
531 void pa_source_put(pa_source *s) {
532 pa_source_assert_ref(s);
533 pa_assert_ctl_context();
534
535 pa_assert(s->state == PA_SOURCE_INIT);
536 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
537
538 /* The following fields must be initialized properly when calling _put() */
539 pa_assert(s->asyncmsgq);
540 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
541
542 /* Generally, flags should be initialized via pa_source_new(). As a
543 * special exception we allow some volume related flags to be set
544 * between _new() and _put() by the callback setter functions above.
545 *
546 * Thus we implement a couple safeguards here which ensure the above
547 * setters were used (or at least the implementor made manual changes
548 * in a compatible way).
549 *
550 * Note: All of these flags set here can change over the life time
551 * of the source. */
552 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
553 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
554 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
555
556 /* XXX: Currently decibel volume is disabled for all sources that use volume
557 * sharing. When the master source supports decibel volume, it would be good
558 * to have the flag also in the filter source, but currently we don't do that
559 * so that the flags of the filter source never change when it's moved from
560 * a master source to another. One solution for this problem would be to
561 * remove user-visible volume altogether from filter sources when volume
562 * sharing is used, but the current approach was easier to implement... */
563 /* We always support decibel volumes in software, otherwise we leave it to
564 * the source implementor to set this flag as needed.
565 *
566 * Note: This flag can also change over the life time of the source. */
567 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
568 pa_source_enable_decibel_volume(s, true);
569
570 /* If the source implementor support DB volumes by itself, we should always
571 * try and enable flat volumes too */
572 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
573 enable_flat_volume(s, true);
574
575 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
576 pa_source *root_source = pa_source_get_master(s);
577
578 pa_assert(PA_LIKELY(root_source));
579
580 s->reference_volume = root_source->reference_volume;
581 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
582
583 s->real_volume = root_source->real_volume;
584 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
585 } else
586 /* We assume that if the sink implementor changed the default
587 * volume he did so in real_volume, because that is the usual
588 * place where he is supposed to place his changes. */
589 s->reference_volume = s->real_volume;
590
591 s->thread_info.soft_volume = s->soft_volume;
592 s->thread_info.soft_muted = s->muted;
593 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
594
595 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
596 || (s->base_volume == PA_VOLUME_NORM
597 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
598 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
599 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
600
601 if (s->suspend_cause)
602 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
603 else
604 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
605
606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
607 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
608 }
609
610 /* Called from main context */
611 void pa_source_unlink(pa_source *s) {
612 bool linked;
613 pa_source_output *o, *j = NULL;
614
615 pa_assert(s);
616 pa_assert_ctl_context();
617
618 /* See pa_sink_unlink() for a couple of comments how this function
619 * works. */
620
621 linked = PA_SOURCE_IS_LINKED(s->state);
622
623 if (linked)
624 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
625
626 if (s->state != PA_SOURCE_UNLINKED)
627 pa_namereg_unregister(s->core, s->name);
628 pa_idxset_remove_by_data(s->core->sources, s, NULL);
629
630 if (s->card)
631 pa_idxset_remove_by_data(s->card->sources, s, NULL);
632
633 while ((o = pa_idxset_first(s->outputs, NULL))) {
634 pa_assert(o != j);
635 pa_source_output_kill(o);
636 j = o;
637 }
638
639 if (linked)
640 source_set_state(s, PA_SOURCE_UNLINKED);
641 else
642 s->state = PA_SOURCE_UNLINKED;
643
644 reset_callbacks(s);
645
646 if (linked) {
647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
648 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
649 }
650 }
651
652 /* Called from main context */
653 static void source_free(pa_object *o) {
654 pa_source *s = PA_SOURCE(o);
655
656 pa_assert(s);
657 pa_assert_ctl_context();
658 pa_assert(pa_source_refcnt(s) == 0);
659
660 if (PA_SOURCE_IS_LINKED(s->state))
661 pa_source_unlink(s);
662
663 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
664
665 pa_idxset_free(s->outputs, NULL);
666 pa_hashmap_free(s->thread_info.outputs);
667
668 if (s->silence.memblock)
669 pa_memblock_unref(s->silence.memblock);
670
671 pa_xfree(s->name);
672 pa_xfree(s->driver);
673
674 if (s->proplist)
675 pa_proplist_free(s->proplist);
676
677 if (s->ports)
678 pa_hashmap_free(s->ports);
679
680 pa_xfree(s);
681 }
682
683 /* Called from main context, and not while the IO thread is active, please */
684 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
685 pa_source_assert_ref(s);
686 pa_assert_ctl_context();
687
688 s->asyncmsgq = q;
689 }
690
691 /* Called from main context, and not while the IO thread is active, please */
692 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
693 pa_source_flags_t old_flags;
694 pa_source_output *output;
695 uint32_t idx;
696
697 pa_source_assert_ref(s);
698 pa_assert_ctl_context();
699
700 /* For now, allow only a minimal set of flags to be changed. */
701 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
702
703 old_flags = s->flags;
704 s->flags = (s->flags & ~mask) | (value & mask);
705
706 if (s->flags == old_flags)
707 return;
708
709 if ((s->flags & PA_SOURCE_LATENCY) != (old_flags & PA_SOURCE_LATENCY))
710 pa_log_debug("Source %s: LATENCY flag %s.", s->name, (s->flags & PA_SOURCE_LATENCY) ? "enabled" : "disabled");
711
712 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY) != (old_flags & PA_SOURCE_DYNAMIC_LATENCY))
713 pa_log_debug("Source %s: DYNAMIC_LATENCY flag %s.",
714 s->name, (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ? "enabled" : "disabled");
715
716 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
717 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_FLAGS_CHANGED], s);
718
719 PA_IDXSET_FOREACH(output, s->outputs, idx) {
720 if (output->destination_source)
721 pa_source_update_flags(output->destination_source, mask, value);
722 }
723 }
724
725 /* Called from IO context, or before _put() from main context */
726 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
727 pa_source_assert_ref(s);
728 pa_source_assert_io_context(s);
729
730 s->thread_info.rtpoll = p;
731 }
732
733 /* Called from main context */
734 int pa_source_update_status(pa_source*s) {
735 pa_source_assert_ref(s);
736 pa_assert_ctl_context();
737 pa_assert(PA_SOURCE_IS_LINKED(s->state));
738
739 if (s->state == PA_SOURCE_SUSPENDED)
740 return 0;
741
742 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
743 }
744
745 /* Called from any context - must be threadsafe */
746 void pa_source_set_mixer_dirty(pa_source *s, bool is_dirty) {
747 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
748 }
749
750 /* Called from main context */
751 int pa_source_suspend(pa_source *s, bool suspend, pa_suspend_cause_t cause) {
752 pa_source_assert_ref(s);
753 pa_assert_ctl_context();
754 pa_assert(PA_SOURCE_IS_LINKED(s->state));
755 pa_assert(cause != 0);
756
757 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
758 return -PA_ERR_NOTSUPPORTED;
759
760 if (suspend)
761 s->suspend_cause |= cause;
762 else
763 s->suspend_cause &= ~cause;
764
765 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
766 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
767 it'll be handled just fine. */
768 pa_source_set_mixer_dirty(s, false);
769 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
770 if (s->active_port && s->set_port) {
771 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
772 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
773 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
774 }
775 else
776 s->set_port(s, s->active_port);
777 }
778 else {
779 if (s->set_mute)
780 s->set_mute(s);
781 if (s->set_volume)
782 s->set_volume(s);
783 }
784 }
785
786 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
787 return 0;
788
789 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
790
791 if (s->suspend_cause)
792 return source_set_state(s, PA_SOURCE_SUSPENDED);
793 else
794 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
795 }
796
797 /* Called from main context */
798 int pa_source_sync_suspend(pa_source *s) {
799 pa_sink_state_t state;
800
801 pa_source_assert_ref(s);
802 pa_assert_ctl_context();
803 pa_assert(PA_SOURCE_IS_LINKED(s->state));
804 pa_assert(s->monitor_of);
805
806 state = pa_sink_get_state(s->monitor_of);
807
808 if (state == PA_SINK_SUSPENDED)
809 return source_set_state(s, PA_SOURCE_SUSPENDED);
810
811 pa_assert(PA_SINK_IS_OPENED(state));
812
813 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
814 }
815
816 /* Called from main context */
817 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
818 pa_source_output *o, *n;
819 uint32_t idx;
820
821 pa_source_assert_ref(s);
822 pa_assert_ctl_context();
823 pa_assert(PA_SOURCE_IS_LINKED(s->state));
824
825 if (!q)
826 q = pa_queue_new();
827
828 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
829 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
830
831 pa_source_output_ref(o);
832
833 if (pa_source_output_start_move(o) >= 0)
834 pa_queue_push(q, o);
835 else
836 pa_source_output_unref(o);
837 }
838
839 return q;
840 }
841
842 /* Called from main context */
843 void pa_source_move_all_finish(pa_source *s, pa_queue *q, bool save) {
844 pa_source_output *o;
845
846 pa_source_assert_ref(s);
847 pa_assert_ctl_context();
848 pa_assert(PA_SOURCE_IS_LINKED(s->state));
849 pa_assert(q);
850
851 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
852 if (pa_source_output_finish_move(o, s, save) < 0)
853 pa_source_output_fail_move(o);
854
855 pa_source_output_unref(o);
856 }
857
858 pa_queue_free(q, NULL);
859 }
860
861 /* Called from main context */
862 void pa_source_move_all_fail(pa_queue *q) {
863 pa_source_output *o;
864
865 pa_assert_ctl_context();
866 pa_assert(q);
867
868 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
869 pa_source_output_fail_move(o);
870 pa_source_output_unref(o);
871 }
872
873 pa_queue_free(q, NULL);
874 }
875
876 /* Called from IO thread context */
877 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
878 pa_source_output *o;
879 void *state = NULL;
880
881 pa_source_assert_ref(s);
882 pa_source_assert_io_context(s);
883 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
884
885 if (nbytes <= 0)
886 return;
887
888 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
889 return;
890
891 pa_log_debug("Processing rewind...");
892
893 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
894 pa_source_output_assert_ref(o);
895 pa_source_output_process_rewind(o, nbytes);
896 }
897 }
898
899 /* Called from IO thread context */
900 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
901 pa_source_output *o;
902 void *state = NULL;
903
904 pa_source_assert_ref(s);
905 pa_source_assert_io_context(s);
906 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
907 pa_assert(chunk);
908
909 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
910 return;
911
912 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
913 pa_memchunk vchunk = *chunk;
914
915 pa_memblock_ref(vchunk.memblock);
916 pa_memchunk_make_writable(&vchunk, 0);
917
918 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
919 pa_silence_memchunk(&vchunk, &s->sample_spec);
920 else
921 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
922
923 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
924 pa_source_output_assert_ref(o);
925
926 if (!o->thread_info.direct_on_input)
927 pa_source_output_push(o, &vchunk);
928 }
929
930 pa_memblock_unref(vchunk.memblock);
931 } else {
932
933 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
934 pa_source_output_assert_ref(o);
935
936 if (!o->thread_info.direct_on_input)
937 pa_source_output_push(o, chunk);
938 }
939 }
940 }
941
942 /* Called from IO thread context */
943 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
944 pa_source_assert_ref(s);
945 pa_source_assert_io_context(s);
946 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
947 pa_source_output_assert_ref(o);
948 pa_assert(o->thread_info.direct_on_input);
949 pa_assert(chunk);
950
951 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
952 return;
953
954 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
955 pa_memchunk vchunk = *chunk;
956
957 pa_memblock_ref(vchunk.memblock);
958 pa_memchunk_make_writable(&vchunk, 0);
959
960 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
961 pa_silence_memchunk(&vchunk, &s->sample_spec);
962 else
963 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
964
965 pa_source_output_push(o, &vchunk);
966
967 pa_memblock_unref(vchunk.memblock);
968 } else
969 pa_source_output_push(o, chunk);
970 }
971
972 /* Called from main thread */
973 int pa_source_update_rate(pa_source *s, uint32_t rate, bool passthrough) {
974 int ret;
975 uint32_t desired_rate = rate;
976 uint32_t default_rate = s->default_sample_rate;
977 uint32_t alternate_rate = s->alternate_sample_rate;
978 bool use_alternate = false;
979
980 if (rate == s->sample_spec.rate)
981 return 0;
982
983 if (!s->update_rate && !s->monitor_of)
984 return -1;
985
986 if (PA_UNLIKELY(default_rate == alternate_rate && !passthrough)) {
987 pa_log_debug("Default and alternate sample rates are the same.");
988 return -1;
989 }
990
991 if (PA_SOURCE_IS_RUNNING(s->state)) {
992 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
993 s->sample_spec.rate);
994 return -1;
995 }
996
997 if (s->monitor_of) {
998 if (PA_SINK_IS_RUNNING(s->monitor_of->state)) {
999 pa_log_info("Cannot update rate, this is a monitor source and the sink is running.");
1000 return -1;
1001 }
1002 }
1003
1004 if (PA_UNLIKELY(!pa_sample_rate_valid(desired_rate)))
1005 return -1;
1006
1007 if (!passthrough) {
1008 pa_assert((default_rate % 4000 == 0) || (default_rate % 11025 == 0));
1009 pa_assert((alternate_rate % 4000 == 0) || (alternate_rate % 11025 == 0));
1010
1011 if (default_rate % 11025 == 0) {
1012 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
1013 use_alternate=true;
1014 } else {
1015 /* default is 4000 multiple */
1016 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
1017 use_alternate=true;
1018 }
1019
1020 if (use_alternate)
1021 desired_rate = alternate_rate;
1022 else
1023 desired_rate = default_rate;
1024 } else {
1025 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
1026 }
1027
1028 if (desired_rate == s->sample_spec.rate)
1029 return -1;
1030
1031 if (!passthrough && pa_source_used_by(s) > 0)
1032 return -1;
1033
1034 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1035 pa_source_suspend(s, true, PA_SUSPEND_INTERNAL);
1036
1037 if (s->update_rate)
1038 ret = s->update_rate(s, desired_rate);
1039 else {
1040 /* This is a monitor source. */
1041
1042 /* XXX: This code is written with non-passthrough streams in mind. I
1043 * have no idea whether the behaviour with passthrough streams is
1044 * sensible. */
1045 if (!passthrough) {
1046 uint32_t old_rate = s->sample_spec.rate;
1047
1048 s->sample_spec.rate = desired_rate;
1049 ret = pa_sink_update_rate(s->monitor_of, desired_rate, false);
1050
1051 if (ret < 0) {
1052 /* Changing the sink rate failed, roll back the old rate for
1053 * the monitor source. Why did we set the source rate before
1054 * calling pa_sink_update_rate(), you may ask. The reason is
1055 * that pa_sink_update_rate() tries to update the monitor
1056 * source rate, but we are already in the process of updating
1057 * the monitor source rate, so there's a risk of entering an
1058 * infinite loop. Setting the source rate before calling
1059 * pa_sink_update_rate() makes the rate == s->sample_spec.rate
1060 * check in the beginning of this function return early, so we
1061 * avoid looping. */
1062 s->sample_spec.rate = old_rate;
1063 }
1064 } else
1065 ret = -1;
1066 }
1067
1068 if (ret >= 0) {
1069 uint32_t idx;
1070 pa_source_output *o;
1071
1072 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1073 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1074 pa_source_output_update_rate(o);
1075 }
1076
1077 pa_log_info("Changed sampling rate successfully");
1078 }
1079
1080 pa_source_suspend(s, false, PA_SUSPEND_INTERNAL);
1081
1082 return ret;
1083 }
1084
1085 /* Called from main thread */
1086 pa_usec_t pa_source_get_latency(pa_source *s) {
1087 pa_usec_t usec;
1088
1089 pa_source_assert_ref(s);
1090 pa_assert_ctl_context();
1091 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1092
1093 if (s->state == PA_SOURCE_SUSPENDED)
1094 return 0;
1095
1096 if (!(s->flags & PA_SOURCE_LATENCY))
1097 return 0;
1098
1099 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1100
1101 /* usec is unsigned, so check that the offset can be added to usec without
1102 * underflowing. */
1103 if (-s->latency_offset <= (int64_t) usec)
1104 usec += s->latency_offset;
1105 else
1106 usec = 0;
1107
1108 return usec;
1109 }
1110
1111 /* Called from IO thread */
1112 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1113 pa_usec_t usec = 0;
1114 pa_msgobject *o;
1115
1116 pa_source_assert_ref(s);
1117 pa_source_assert_io_context(s);
1118 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1119
1120 /* The returned value is supposed to be in the time domain of the sound card! */
1121
1122 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1123 return 0;
1124
1125 if (!(s->flags & PA_SOURCE_LATENCY))
1126 return 0;
1127
1128 o = PA_MSGOBJECT(s);
1129
1130 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1131
1132 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1133 return -1;
1134
1135 /* usec is unsigned, so check that the offset can be added to usec without
1136 * underflowing. */
1137 if (-s->thread_info.latency_offset <= (int64_t) usec)
1138 usec += s->thread_info.latency_offset;
1139 else
1140 usec = 0;
1141
1142 return usec;
1143 }
1144
1145 /* Called from the main thread (and also from the IO thread while the main
1146 * thread is waiting).
1147 *
1148 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1149 * set. Instead, flat volume mode is detected by checking whether the root source
1150 * has the flag set. */
1151 bool pa_source_flat_volume_enabled(pa_source *s) {
1152 pa_source_assert_ref(s);
1153
1154 s = pa_source_get_master(s);
1155
1156 if (PA_LIKELY(s))
1157 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1158 else
1159 return false;
1160 }
1161
1162 /* Called from the main thread (and also from the IO thread while the main
1163 * thread is waiting). */
1164 pa_source *pa_source_get_master(pa_source *s) {
1165 pa_source_assert_ref(s);
1166
1167 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1168 if (PA_UNLIKELY(!s->output_from_master))
1169 return NULL;
1170
1171 s = s->output_from_master->source;
1172 }
1173
1174 return s;
1175 }
1176
1177 /* Called from main context */
1178 bool pa_source_is_passthrough(pa_source *s) {
1179
1180 pa_source_assert_ref(s);
1181
1182 /* NB Currently only monitor sources support passthrough mode */
1183 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1184 }
1185
1186 /* Called from main context */
1187 void pa_source_enter_passthrough(pa_source *s) {
1188 pa_cvolume volume;
1189
1190 /* set the volume to NORM */
1191 s->saved_volume = *pa_source_get_volume(s, true);
1192 s->saved_save_volume = s->save_volume;
1193
1194 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1195 pa_source_set_volume(s, &volume, true, false);
1196 }
1197
1198 /* Called from main context */
1199 void pa_source_leave_passthrough(pa_source *s) {
1200 /* Restore source volume to what it was before we entered passthrough mode */
1201 pa_source_set_volume(s, &s->saved_volume, true, s->saved_save_volume);
1202
1203 pa_cvolume_init(&s->saved_volume);
1204 s->saved_save_volume = false;
1205 }
1206
1207 /* Called from main context. */
1208 static void compute_reference_ratio(pa_source_output *o) {
1209 unsigned c = 0;
1210 pa_cvolume remapped;
1211
1212 pa_assert(o);
1213 pa_assert(pa_source_flat_volume_enabled(o->source));
1214
1215 /*
1216 * Calculates the reference ratio from the source's reference
1217 * volume. This basically calculates:
1218 *
1219 * o->reference_ratio = o->volume / o->source->reference_volume
1220 */
1221
1222 remapped = o->source->reference_volume;
1223 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1224
1225 o->reference_ratio.channels = o->sample_spec.channels;
1226
1227 for (c = 0; c < o->sample_spec.channels; c++) {
1228
1229 /* We don't update when the source volume is 0 anyway */
1230 if (remapped.values[c] <= PA_VOLUME_MUTED)
1231 continue;
1232
1233 /* Don't update the reference ratio unless necessary */
1234 if (pa_sw_volume_multiply(
1235 o->reference_ratio.values[c],
1236 remapped.values[c]) == o->volume.values[c])
1237 continue;
1238
1239 o->reference_ratio.values[c] = pa_sw_volume_divide(
1240 o->volume.values[c],
1241 remapped.values[c]);
1242 }
1243 }
1244
1245 /* Called from main context. Only called for the root source in volume sharing
1246 * cases, except for internal recursive calls. */
1247 static void compute_reference_ratios(pa_source *s) {
1248 uint32_t idx;
1249 pa_source_output *o;
1250
1251 pa_source_assert_ref(s);
1252 pa_assert_ctl_context();
1253 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1254 pa_assert(pa_source_flat_volume_enabled(s));
1255
1256 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1257 compute_reference_ratio(o);
1258
1259 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1260 compute_reference_ratios(o->destination_source);
1261 }
1262 }
1263
1264 /* Called from main context. Only called for the root source in volume sharing
1265 * cases, except for internal recursive calls. */
1266 static void compute_real_ratios(pa_source *s) {
1267 pa_source_output *o;
1268 uint32_t idx;
1269
1270 pa_source_assert_ref(s);
1271 pa_assert_ctl_context();
1272 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1273 pa_assert(pa_source_flat_volume_enabled(s));
1274
1275 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1276 unsigned c;
1277 pa_cvolume remapped;
1278
1279 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1280 /* The origin source uses volume sharing, so this input's real ratio
1281 * is handled as a special case - the real ratio must be 0 dB, and
1282 * as a result i->soft_volume must equal i->volume_factor. */
1283 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1284 o->soft_volume = o->volume_factor;
1285
1286 compute_real_ratios(o->destination_source);
1287
1288 continue;
1289 }
1290
1291 /*
1292 * This basically calculates:
1293 *
1294 * i->real_ratio := i->volume / s->real_volume
1295 * i->soft_volume := i->real_ratio * i->volume_factor
1296 */
1297
1298 remapped = s->real_volume;
1299 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1300
1301 o->real_ratio.channels = o->sample_spec.channels;
1302 o->soft_volume.channels = o->sample_spec.channels;
1303
1304 for (c = 0; c < o->sample_spec.channels; c++) {
1305
1306 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1307 /* We leave o->real_ratio untouched */
1308 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1309 continue;
1310 }
1311
1312 /* Don't lose accuracy unless necessary */
1313 if (pa_sw_volume_multiply(
1314 o->real_ratio.values[c],
1315 remapped.values[c]) != o->volume.values[c])
1316
1317 o->real_ratio.values[c] = pa_sw_volume_divide(
1318 o->volume.values[c],
1319 remapped.values[c]);
1320
1321 o->soft_volume.values[c] = pa_sw_volume_multiply(
1322 o->real_ratio.values[c],
1323 o->volume_factor.values[c]);
1324 }
1325
1326 /* We don't copy the soft_volume to the thread_info data
1327 * here. That must be done by the caller */
1328 }
1329 }
1330
1331 static pa_cvolume *cvolume_remap_minimal_impact(
1332 pa_cvolume *v,
1333 const pa_cvolume *template,
1334 const pa_channel_map *from,
1335 const pa_channel_map *to) {
1336
1337 pa_cvolume t;
1338
1339 pa_assert(v);
1340 pa_assert(template);
1341 pa_assert(from);
1342 pa_assert(to);
1343 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1344 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1345
1346 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1347 * mapping from source output to source volumes:
1348 *
1349 * If template is a possible remapping from v it is used instead
1350 * of remapping anew.
1351 *
1352 * If the channel maps don't match we set an all-channel volume on
1353 * the source to ensure that changing a volume on one stream has no
1354 * effect that cannot be compensated for in another stream that
1355 * does not have the same channel map as the source. */
1356
1357 if (pa_channel_map_equal(from, to))
1358 return v;
1359
1360 t = *template;
1361 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1362 *v = *template;
1363 return v;
1364 }
1365
1366 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1367 return v;
1368 }
1369
1370 /* Called from main thread. Only called for the root source in volume sharing
1371 * cases, except for internal recursive calls. */
1372 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1373 pa_source_output *o;
1374 uint32_t idx;
1375
1376 pa_source_assert_ref(s);
1377 pa_assert(max_volume);
1378 pa_assert(channel_map);
1379 pa_assert(pa_source_flat_volume_enabled(s));
1380
1381 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1382 pa_cvolume remapped;
1383
1384 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1385 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1386
1387 /* Ignore this output. The origin source uses volume sharing, so this
1388 * output's volume will be set to be equal to the root source's real
1389 * volume. Obviously this output's current volume must not then
1390 * affect what the root source's real volume will be. */
1391 continue;
1392 }
1393
1394 remapped = o->volume;
1395 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1396 pa_cvolume_merge(max_volume, max_volume, &remapped);
1397 }
1398 }
1399
1400 /* Called from main thread. Only called for the root source in volume sharing
1401 * cases, except for internal recursive calls. */
1402 static bool has_outputs(pa_source *s) {
1403 pa_source_output *o;
1404 uint32_t idx;
1405
1406 pa_source_assert_ref(s);
1407
1408 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1409 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1410 return true;
1411 }
1412
1413 return false;
1414 }
1415
1416 /* Called from main thread. Only called for the root source in volume sharing
1417 * cases, except for internal recursive calls. */
1418 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1419 pa_source_output *o;
1420 uint32_t idx;
1421
1422 pa_source_assert_ref(s);
1423 pa_assert(new_volume);
1424 pa_assert(channel_map);
1425
1426 s->real_volume = *new_volume;
1427 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1428
1429 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1430 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1431 if (pa_source_flat_volume_enabled(s)) {
1432 pa_cvolume old_volume = o->volume;
1433
1434 /* Follow the root source's real volume. */
1435 o->volume = *new_volume;
1436 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1437 compute_reference_ratio(o);
1438
1439 /* The volume changed, let's tell people so */
1440 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1441 if (o->volume_changed)
1442 o->volume_changed(o);
1443
1444 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1445 }
1446 }
1447
1448 update_real_volume(o->destination_source, new_volume, channel_map);
1449 }
1450 }
1451 }
1452
1453 /* Called from main thread. Only called for the root source in shared volume
1454 * cases. */
1455 static void compute_real_volume(pa_source *s) {
1456 pa_source_assert_ref(s);
1457 pa_assert_ctl_context();
1458 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1459 pa_assert(pa_source_flat_volume_enabled(s));
1460 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1461
1462 /* This determines the maximum volume of all streams and sets
1463 * s->real_volume accordingly. */
1464
1465 if (!has_outputs(s)) {
1466 /* In the special case that we have no source outputs we leave the
1467 * volume unmodified. */
1468 update_real_volume(s, &s->reference_volume, &s->channel_map);
1469 return;
1470 }
1471
1472 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1473
1474 /* First let's determine the new maximum volume of all outputs
1475 * connected to this source */
1476 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1477 update_real_volume(s, &s->real_volume, &s->channel_map);
1478
1479 /* Then, let's update the real ratios/soft volumes of all outputs
1480 * connected to this source */
1481 compute_real_ratios(s);
1482 }
1483
1484 /* Called from main thread. Only called for the root source in shared volume
1485 * cases, except for internal recursive calls. */
1486 static void propagate_reference_volume(pa_source *s) {
1487 pa_source_output *o;
1488 uint32_t idx;
1489
1490 pa_source_assert_ref(s);
1491 pa_assert_ctl_context();
1492 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1493 pa_assert(pa_source_flat_volume_enabled(s));
1494
1495 /* This is called whenever the source volume changes that is not
1496 * caused by a source output volume change. We need to fix up the
1497 * source output volumes accordingly */
1498
1499 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1500 pa_cvolume old_volume;
1501
1502 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1503 propagate_reference_volume(o->destination_source);
1504
1505 /* Since the origin source uses volume sharing, this output's volume
1506 * needs to be updated to match the root source's real volume, but
1507 * that will be done later in update_shared_real_volume(). */
1508 continue;
1509 }
1510
1511 old_volume = o->volume;
1512
1513 /* This basically calculates:
1514 *
1515 * o->volume := o->reference_volume * o->reference_ratio */
1516
1517 o->volume = s->reference_volume;
1518 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1519 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1520
1521 /* The volume changed, let's tell people so */
1522 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1523
1524 if (o->volume_changed)
1525 o->volume_changed(o);
1526
1527 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1528 }
1529 }
1530 }
1531
1532 /* Called from main thread. Only called for the root source in volume sharing
1533 * cases, except for internal recursive calls. The return value indicates
1534 * whether any reference volume actually changed. */
1535 static bool update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, bool save) {
1536 pa_cvolume volume;
1537 bool reference_volume_changed;
1538 pa_source_output *o;
1539 uint32_t idx;
1540
1541 pa_source_assert_ref(s);
1542 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1543 pa_assert(v);
1544 pa_assert(channel_map);
1545 pa_assert(pa_cvolume_valid(v));
1546
1547 volume = *v;
1548 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1549
1550 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1551 pa_source_set_reference_volume_direct(s, &volume);
1552
1553 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1554
1555 if (!reference_volume_changed && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1556 /* If the root source's volume doesn't change, then there can't be any
1557 * changes in the other source in the source tree either.
1558 *
1559 * It's probably theoretically possible that even if the root source's
1560 * volume changes slightly, some filter source doesn't change its volume
1561 * due to rounding errors. If that happens, we still want to propagate
1562 * the changed root source volume to the sources connected to the
1563 * intermediate source that didn't change its volume. This theoretical
1564 * possibility is the reason why we have that !(s->flags &
1565 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1566 * notice even if we returned here false always if
1567 * reference_volume_changed is false. */
1568 return false;
1569
1570 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1571 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1572 update_reference_volume(o->destination_source, v, channel_map, false);
1573 }
1574
1575 return true;
1576 }
1577
1578 /* Called from main thread */
1579 void pa_source_set_volume(
1580 pa_source *s,
1581 const pa_cvolume *volume,
1582 bool send_msg,
1583 bool save) {
1584
1585 pa_cvolume new_reference_volume;
1586 pa_source *root_source;
1587
1588 pa_source_assert_ref(s);
1589 pa_assert_ctl_context();
1590 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1591 pa_assert(!volume || pa_cvolume_valid(volume));
1592 pa_assert(volume || pa_source_flat_volume_enabled(s));
1593 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1594
1595 /* make sure we don't change the volume in PASSTHROUGH mode ...
1596 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1597 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1598 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1599 return;
1600 }
1601
1602 /* In case of volume sharing, the volume is set for the root source first,
1603 * from which it's then propagated to the sharing sources. */
1604 root_source = pa_source_get_master(s);
1605
1606 if (PA_UNLIKELY(!root_source))
1607 return;
1608
1609 /* As a special exception we accept mono volumes on all sources --
1610 * even on those with more complex channel maps */
1611
1612 if (volume) {
1613 if (pa_cvolume_compatible(volume, &s->sample_spec))
1614 new_reference_volume = *volume;
1615 else {
1616 new_reference_volume = s->reference_volume;
1617 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1618 }
1619
1620 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1621
1622 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1623 if (pa_source_flat_volume_enabled(root_source)) {
1624 /* OK, propagate this volume change back to the outputs */
1625 propagate_reference_volume(root_source);
1626
1627 /* And now recalculate the real volume */
1628 compute_real_volume(root_source);
1629 } else
1630 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1631 }
1632
1633 } else {
1634 /* If volume is NULL we synchronize the source's real and
1635 * reference volumes with the stream volumes. */
1636
1637 pa_assert(pa_source_flat_volume_enabled(root_source));
1638
1639 /* Ok, let's determine the new real volume */
1640 compute_real_volume(root_source);
1641
1642 /* Let's 'push' the reference volume if necessary */
1643 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1644 /* If the source and its root don't have the same number of channels, we need to remap */
1645 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1646 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1647 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1648
1649 /* Now that the reference volume is updated, we can update the streams'
1650 * reference ratios. */
1651 compute_reference_ratios(root_source);
1652 }
1653
1654 if (root_source->set_volume) {
1655 /* If we have a function set_volume(), then we do not apply a
1656 * soft volume by default. However, set_volume() is free to
1657 * apply one to root_source->soft_volume */
1658
1659 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1660 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1661 root_source->set_volume(root_source);
1662
1663 } else
1664 /* If we have no function set_volume(), then the soft volume
1665 * becomes the real volume */
1666 root_source->soft_volume = root_source->real_volume;
1667
1668 /* This tells the source that soft volume and/or real volume changed */
1669 if (send_msg)
1670 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1671 }
1672
1673 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1674 * Only to be called by source implementor */
1675 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1676
1677 pa_source_assert_ref(s);
1678 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1679
1680 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1681 pa_source_assert_io_context(s);
1682 else
1683 pa_assert_ctl_context();
1684
1685 if (!volume)
1686 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1687 else
1688 s->soft_volume = *volume;
1689
1690 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1691 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1692 else
1693 s->thread_info.soft_volume = s->soft_volume;
1694 }
1695
1696 /* Called from the main thread. Only called for the root source in volume sharing
1697 * cases, except for internal recursive calls. */
1698 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1699 pa_source_output *o;
1700 uint32_t idx;
1701
1702 pa_source_assert_ref(s);
1703 pa_assert(old_real_volume);
1704 pa_assert_ctl_context();
1705 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1706
1707 /* This is called when the hardware's real volume changes due to
1708 * some external event. We copy the real volume into our
1709 * reference volume and then rebuild the stream volumes based on
1710 * i->real_ratio which should stay fixed. */
1711
1712 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1713 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1714 return;
1715
1716 /* 1. Make the real volume the reference volume */
1717 update_reference_volume(s, &s->real_volume, &s->channel_map, true);
1718 }
1719
1720 if (pa_source_flat_volume_enabled(s)) {
1721
1722 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1723 pa_cvolume old_volume = o->volume;
1724
1725 /* 2. Since the source's reference and real volumes are equal
1726 * now our ratios should be too. */
1727 o->reference_ratio = o->real_ratio;
1728
1729 /* 3. Recalculate the new stream reference volume based on the
1730 * reference ratio and the sink's reference volume.
1731 *
1732 * This basically calculates:
1733 *
1734 * o->volume = s->reference_volume * o->reference_ratio
1735 *
1736 * This is identical to propagate_reference_volume() */
1737 o->volume = s->reference_volume;
1738 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1739 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1740
1741 /* Notify if something changed */
1742 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1743
1744 if (o->volume_changed)
1745 o->volume_changed(o);
1746
1747 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1748 }
1749
1750 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1751 propagate_real_volume(o->destination_source, old_real_volume);
1752 }
1753 }
1754
1755 /* Something got changed in the hardware. It probably makes sense
1756 * to save changed hw settings given that hw volume changes not
1757 * triggered by PA are almost certainly done by the user. */
1758 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1759 s->save_volume = true;
1760 }
1761
1762 /* Called from io thread */
1763 void pa_source_update_volume_and_mute(pa_source *s) {
1764 pa_assert(s);
1765 pa_source_assert_io_context(s);
1766
1767 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1768 }
1769
1770 /* Called from main thread */
1771 const pa_cvolume *pa_source_get_volume(pa_source *s, bool force_refresh) {
1772 pa_source_assert_ref(s);
1773 pa_assert_ctl_context();
1774 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1775
1776 if (s->refresh_volume || force_refresh) {
1777 struct pa_cvolume old_real_volume;
1778
1779 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1780
1781 old_real_volume = s->real_volume;
1782
1783 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1784 s->get_volume(s);
1785
1786 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1787
1788 update_real_volume(s, &s->real_volume, &s->channel_map);
1789 propagate_real_volume(s, &old_real_volume);
1790 }
1791
1792 return &s->reference_volume;
1793 }
1794
1795 /* Called from main thread. In volume sharing cases, only the root source may
1796 * call this. */
1797 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1798 pa_cvolume old_real_volume;
1799
1800 pa_source_assert_ref(s);
1801 pa_assert_ctl_context();
1802 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1803 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1804
1805 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1806
1807 old_real_volume = s->real_volume;
1808 update_real_volume(s, new_real_volume, &s->channel_map);
1809 propagate_real_volume(s, &old_real_volume);
1810 }
1811
1812 /* Called from main thread */
1813 void pa_source_set_mute(pa_source *s, bool mute, bool save) {
1814 bool old_muted;
1815
1816 pa_source_assert_ref(s);
1817 pa_assert_ctl_context();
1818 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1819
1820 old_muted = s->muted;
1821 s->muted = mute;
1822 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1823
1824 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute)
1825 s->set_mute(s);
1826
1827 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1828
1829 if (old_muted != s->muted)
1830 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1831 }
1832
1833 /* Called from main thread */
1834 bool pa_source_get_mute(pa_source *s, bool force_refresh) {
1835
1836 pa_source_assert_ref(s);
1837 pa_assert_ctl_context();
1838 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1839
1840 if (s->refresh_muted || force_refresh) {
1841 bool old_muted = s->muted;
1842
1843 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_mute)
1844 s->get_mute(s);
1845
1846 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1847
1848 if (old_muted != s->muted) {
1849 s->save_muted = true;
1850
1851 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1852
1853 /* Make sure the soft mute status stays in sync */
1854 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1855 }
1856 }
1857
1858 return s->muted;
1859 }
1860
1861 /* Called from main thread */
1862 void pa_source_mute_changed(pa_source *s, bool new_muted) {
1863 pa_source_assert_ref(s);
1864 pa_assert_ctl_context();
1865 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1866
1867 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1868
1869 if (s->muted == new_muted)
1870 return;
1871
1872 s->muted = new_muted;
1873 s->save_muted = true;
1874
1875 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1876 }
1877
1878 /* Called from main thread */
1879 bool pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1880 pa_source_assert_ref(s);
1881 pa_assert_ctl_context();
1882
1883 if (p)
1884 pa_proplist_update(s->proplist, mode, p);
1885
1886 if (PA_SOURCE_IS_LINKED(s->state)) {
1887 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1888 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1889 }
1890
1891 return true;
1892 }
1893
1894 /* Called from main thread */
1895 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1896 void pa_source_set_description(pa_source *s, const char *description) {
1897 const char *old;
1898 pa_source_assert_ref(s);
1899 pa_assert_ctl_context();
1900
1901 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1902 return;
1903
1904 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1905
1906 if (old && description && pa_streq(old, description))
1907 return;
1908
1909 if (description)
1910 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1911 else
1912 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1913
1914 if (PA_SOURCE_IS_LINKED(s->state)) {
1915 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1916 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1917 }
1918 }
1919
1920 /* Called from main thread */
1921 unsigned pa_source_linked_by(pa_source *s) {
1922 pa_source_assert_ref(s);
1923 pa_assert_ctl_context();
1924 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1925
1926 return pa_idxset_size(s->outputs);
1927 }
1928
1929 /* Called from main thread */
1930 unsigned pa_source_used_by(pa_source *s) {
1931 unsigned ret;
1932
1933 pa_source_assert_ref(s);
1934 pa_assert_ctl_context();
1935 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1936
1937 ret = pa_idxset_size(s->outputs);
1938 pa_assert(ret >= s->n_corked);
1939
1940 return ret - s->n_corked;
1941 }
1942
1943 /* Called from main thread */
1944 unsigned pa_source_check_suspend(pa_source *s) {
1945 unsigned ret;
1946 pa_source_output *o;
1947 uint32_t idx;
1948
1949 pa_source_assert_ref(s);
1950 pa_assert_ctl_context();
1951
1952 if (!PA_SOURCE_IS_LINKED(s->state))
1953 return 0;
1954
1955 ret = 0;
1956
1957 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1958 pa_source_output_state_t st;
1959
1960 st = pa_source_output_get_state(o);
1961
1962 /* We do not assert here. It is perfectly valid for a source output to
1963 * be in the INIT state (i.e. created, marked done but not yet put)
1964 * and we should not care if it's unlinked as it won't contribute
1965 * towards our busy status.
1966 */
1967 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1968 continue;
1969
1970 if (st == PA_SOURCE_OUTPUT_CORKED)
1971 continue;
1972
1973 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1974 continue;
1975
1976 ret ++;
1977 }
1978
1979 return ret;
1980 }
1981
1982 /* Called from the IO thread */
1983 static void sync_output_volumes_within_thread(pa_source *s) {
1984 pa_source_output *o;
1985 void *state = NULL;
1986
1987 pa_source_assert_ref(s);
1988 pa_source_assert_io_context(s);
1989
1990 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1991 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1992 continue;
1993
1994 o->thread_info.soft_volume = o->soft_volume;
1995 //pa_source_output_request_rewind(o, 0, true, false, false);
1996 }
1997 }
1998
1999 /* Called from the IO thread. Only called for the root source in volume sharing
2000 * cases, except for internal recursive calls. */
2001 static void set_shared_volume_within_thread(pa_source *s) {
2002 pa_source_output *o;
2003 void *state = NULL;
2004
2005 pa_source_assert_ref(s);
2006
2007 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
2008
2009 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
2010 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
2011 set_shared_volume_within_thread(o->destination_source);
2012 }
2013 }
2014
2015 /* Called from IO thread, except when it is not */
2016 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
2017 pa_source *s = PA_SOURCE(object);
2018 pa_source_assert_ref(s);
2019
2020 switch ((pa_source_message_t) code) {
2021
2022 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
2023 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2024
2025 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
2026
2027 if (o->direct_on_input) {
2028 o->thread_info.direct_on_input = o->direct_on_input;
2029 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
2030 }
2031
2032 pa_assert(!o->thread_info.attached);
2033 o->thread_info.attached = true;
2034
2035 if (o->attach)
2036 o->attach(o);
2037
2038 pa_source_output_set_state_within_thread(o, o->state);
2039
2040 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
2041 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2042
2043 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2044
2045 /* We don't just invalidate the requested latency here,
2046 * because if we are in a move we might need to fix up the
2047 * requested latency. */
2048 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
2049
2050 /* In flat volume mode we need to update the volume as
2051 * well */
2052 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2053 }
2054
2055 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
2056 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
2057
2058 pa_source_output_set_state_within_thread(o, o->state);
2059
2060 if (o->detach)
2061 o->detach(o);
2062
2063 pa_assert(o->thread_info.attached);
2064 o->thread_info.attached = false;
2065
2066 if (o->thread_info.direct_on_input) {
2067 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2068 o->thread_info.direct_on_input = NULL;
2069 }
2070
2071 pa_hashmap_remove_and_free(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index));
2072 pa_source_invalidate_requested_latency(s, true);
2073
2074 /* In flat volume mode we need to update the volume as
2075 * well */
2076 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2077 }
2078
2079 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2080 pa_source *root_source = pa_source_get_master(s);
2081
2082 if (PA_LIKELY(root_source))
2083 set_shared_volume_within_thread(root_source);
2084
2085 return 0;
2086 }
2087
2088 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2089
2090 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2091 s->set_volume(s);
2092 pa_source_volume_change_push(s);
2093 }
2094 /* Fall through ... */
2095
2096 case PA_SOURCE_MESSAGE_SET_VOLUME:
2097
2098 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2099 s->thread_info.soft_volume = s->soft_volume;
2100 }
2101
2102 /* Fall through ... */
2103
2104 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2105 sync_output_volumes_within_thread(s);
2106 return 0;
2107
2108 case PA_SOURCE_MESSAGE_GET_VOLUME:
2109
2110 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2111 s->get_volume(s);
2112 pa_source_volume_change_flush(s);
2113 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2114 }
2115
2116 /* In case source implementor reset SW volume. */
2117 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2118 s->thread_info.soft_volume = s->soft_volume;
2119 }
2120
2121 return 0;
2122
2123 case PA_SOURCE_MESSAGE_SET_MUTE:
2124
2125 if (s->thread_info.soft_muted != s->muted) {
2126 s->thread_info.soft_muted = s->muted;
2127 }
2128
2129 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2130 s->set_mute(s);
2131
2132 return 0;
2133
2134 case PA_SOURCE_MESSAGE_GET_MUTE:
2135
2136 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2137 s->get_mute(s);
2138
2139 return 0;
2140
2141 case PA_SOURCE_MESSAGE_SET_STATE: {
2142
2143 bool suspend_change =
2144 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2145 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2146
2147 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2148
2149 if (suspend_change) {
2150 pa_source_output *o;
2151 void *state = NULL;
2152
2153 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2154 if (o->suspend_within_thread)
2155 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2156 }
2157
2158 return 0;
2159 }
2160
2161 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2162
2163 pa_usec_t *usec = userdata;
2164 *usec = pa_source_get_requested_latency_within_thread(s);
2165
2166 /* Yes, that's right, the IO thread will see -1 when no
2167 * explicit requested latency is configured, the main
2168 * thread will see max_latency */
2169 if (*usec == (pa_usec_t) -1)
2170 *usec = s->thread_info.max_latency;
2171
2172 return 0;
2173 }
2174
2175 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2176 pa_usec_t *r = userdata;
2177
2178 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2179
2180 return 0;
2181 }
2182
2183 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2184 pa_usec_t *r = userdata;
2185
2186 r[0] = s->thread_info.min_latency;
2187 r[1] = s->thread_info.max_latency;
2188
2189 return 0;
2190 }
2191
2192 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2193
2194 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2195 return 0;
2196
2197 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2198
2199 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2200 return 0;
2201
2202 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2203
2204 *((size_t*) userdata) = s->thread_info.max_rewind;
2205 return 0;
2206
2207 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2208
2209 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2210 return 0;
2211
2212 case PA_SOURCE_MESSAGE_GET_LATENCY:
2213
2214 if (s->monitor_of) {
2215 *((pa_usec_t*) userdata) = 0;
2216 return 0;
2217 }
2218
2219 /* Implementors need to overwrite this implementation! */
2220 return -1;
2221
2222 case PA_SOURCE_MESSAGE_SET_PORT:
2223
2224 pa_assert(userdata);
2225 if (s->set_port) {
2226 struct source_message_set_port *msg_data = userdata;
2227 msg_data->ret = s->set_port(s, msg_data->port);
2228 }
2229 return 0;
2230
2231 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2232 /* This message is sent from IO-thread and handled in main thread. */
2233 pa_assert_ctl_context();
2234
2235 /* Make sure we're not messing with main thread when no longer linked */
2236 if (!PA_SOURCE_IS_LINKED(s->state))
2237 return 0;
2238
2239 pa_source_get_volume(s, true);
2240 pa_source_get_mute(s, true);
2241 return 0;
2242
2243 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2244 s->thread_info.latency_offset = offset;
2245 return 0;
2246
2247 case PA_SOURCE_MESSAGE_MAX:
2248 ;
2249 }
2250
2251 return -1;
2252 }
2253
2254 /* Called from main thread */
2255 int pa_source_suspend_all(pa_core *c, bool suspend, pa_suspend_cause_t cause) {
2256 pa_source *source;
2257 uint32_t idx;
2258 int ret = 0;
2259
2260 pa_core_assert_ref(c);
2261 pa_assert_ctl_context();
2262 pa_assert(cause != 0);
2263
2264 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2265 int r;
2266
2267 if (source->monitor_of)
2268 continue;
2269
2270 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2271 ret = r;
2272 }
2273
2274 return ret;
2275 }
2276
2277 /* Called from IO thread */
2278 void pa_source_detach_within_thread(pa_source *s) {
2279 pa_source_output *o;
2280 void *state = NULL;
2281
2282 pa_source_assert_ref(s);
2283 pa_source_assert_io_context(s);
2284 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2285
2286 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2287 if (o->detach)
2288 o->detach(o);
2289 }
2290
2291 /* Called from IO thread */
2292 void pa_source_attach_within_thread(pa_source *s) {
2293 pa_source_output *o;
2294 void *state = NULL;
2295
2296 pa_source_assert_ref(s);
2297 pa_source_assert_io_context(s);
2298 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2299
2300 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2301 if (o->attach)
2302 o->attach(o);
2303 }
2304
2305 /* Called from IO thread */
2306 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2307 pa_usec_t result = (pa_usec_t) -1;
2308 pa_source_output *o;
2309 void *state = NULL;
2310
2311 pa_source_assert_ref(s);
2312 pa_source_assert_io_context(s);
2313
2314 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2315 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2316
2317 if (s->thread_info.requested_latency_valid)
2318 return s->thread_info.requested_latency;
2319
2320 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2321 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2322 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2323 result = o->thread_info.requested_source_latency;
2324
2325 if (result != (pa_usec_t) -1)
2326 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2327
2328 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2329 /* Only cache this if we are fully set up */
2330 s->thread_info.requested_latency = result;
2331 s->thread_info.requested_latency_valid = true;
2332 }
2333
2334 return result;
2335 }
2336
2337 /* Called from main thread */
2338 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2339 pa_usec_t usec = 0;
2340
2341 pa_source_assert_ref(s);
2342 pa_assert_ctl_context();
2343 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2344
2345 if (s->state == PA_SOURCE_SUSPENDED)
2346 return 0;
2347
2348 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2349
2350 return usec;
2351 }
2352
2353 /* Called from IO thread */
2354 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2355 pa_source_output *o;
2356 void *state = NULL;
2357
2358 pa_source_assert_ref(s);
2359 pa_source_assert_io_context(s);
2360
2361 if (max_rewind == s->thread_info.max_rewind)
2362 return;
2363
2364 s->thread_info.max_rewind = max_rewind;
2365
2366 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2367 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2368 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2369 }
2370
2371 /* Called from main thread */
2372 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2373 pa_source_assert_ref(s);
2374 pa_assert_ctl_context();
2375
2376 if (PA_SOURCE_IS_LINKED(s->state))
2377 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2378 else
2379 pa_source_set_max_rewind_within_thread(s, max_rewind);
2380 }
2381
2382 /* Called from IO thread */
2383 void pa_source_invalidate_requested_latency(pa_source *s, bool dynamic) {
2384 pa_source_output *o;
2385 void *state = NULL;
2386
2387 pa_source_assert_ref(s);
2388 pa_source_assert_io_context(s);
2389
2390 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2391 s->thread_info.requested_latency_valid = false;
2392 else if (dynamic)
2393 return;
2394
2395 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2396
2397 if (s->update_requested_latency)
2398 s->update_requested_latency(s);
2399
2400 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2401 if (o->update_source_requested_latency)
2402 o->update_source_requested_latency(o);
2403 }
2404
2405 if (s->monitor_of)
2406 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2407 }
2408
2409 /* Called from main thread */
2410 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2411 pa_source_assert_ref(s);
2412 pa_assert_ctl_context();
2413
2414 /* min_latency == 0: no limit
2415 * min_latency anything else: specified limit
2416 *
2417 * Similar for max_latency */
2418
2419 if (min_latency < ABSOLUTE_MIN_LATENCY)
2420 min_latency = ABSOLUTE_MIN_LATENCY;
2421
2422 if (max_latency <= 0 ||
2423 max_latency > ABSOLUTE_MAX_LATENCY)
2424 max_latency = ABSOLUTE_MAX_LATENCY;
2425
2426 pa_assert(min_latency <= max_latency);
2427
2428 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2429 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2430 max_latency == ABSOLUTE_MAX_LATENCY) ||
2431 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2432
2433 if (PA_SOURCE_IS_LINKED(s->state)) {
2434 pa_usec_t r[2];
2435
2436 r[0] = min_latency;
2437 r[1] = max_latency;
2438
2439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2440 } else
2441 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2442 }
2443
2444 /* Called from main thread */
2445 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2446 pa_source_assert_ref(s);
2447 pa_assert_ctl_context();
2448 pa_assert(min_latency);
2449 pa_assert(max_latency);
2450
2451 if (PA_SOURCE_IS_LINKED(s->state)) {
2452 pa_usec_t r[2] = { 0, 0 };
2453
2454 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2455
2456 *min_latency = r[0];
2457 *max_latency = r[1];
2458 } else {
2459 *min_latency = s->thread_info.min_latency;
2460 *max_latency = s->thread_info.max_latency;
2461 }
2462 }
2463
2464 /* Called from IO thread, and from main thread before pa_source_put() is called */
2465 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2466 pa_source_assert_ref(s);
2467 pa_source_assert_io_context(s);
2468
2469 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2470 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2471 pa_assert(min_latency <= max_latency);
2472
2473 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2474 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2475 max_latency == ABSOLUTE_MAX_LATENCY) ||
2476 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2477 s->monitor_of);
2478
2479 if (s->thread_info.min_latency == min_latency &&
2480 s->thread_info.max_latency == max_latency)
2481 return;
2482
2483 s->thread_info.min_latency = min_latency;
2484 s->thread_info.max_latency = max_latency;
2485
2486 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2487 pa_source_output *o;
2488 void *state = NULL;
2489
2490 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2491 if (o->update_source_latency_range)
2492 o->update_source_latency_range(o);
2493 }
2494
2495 pa_source_invalidate_requested_latency(s, false);
2496 }
2497
2498 /* Called from main thread, before the source is put */
2499 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2500 pa_source_assert_ref(s);
2501 pa_assert_ctl_context();
2502
2503 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2504 pa_assert(latency == 0);
2505 return;
2506 }
2507
2508 if (latency < ABSOLUTE_MIN_LATENCY)
2509 latency = ABSOLUTE_MIN_LATENCY;
2510
2511 if (latency > ABSOLUTE_MAX_LATENCY)
2512 latency = ABSOLUTE_MAX_LATENCY;
2513
2514 if (PA_SOURCE_IS_LINKED(s->state))
2515 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2516 else
2517 s->thread_info.fixed_latency = latency;
2518 }
2519
2520 /* Called from main thread */
2521 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2522 pa_usec_t latency;
2523
2524 pa_source_assert_ref(s);
2525 pa_assert_ctl_context();
2526
2527 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2528 return 0;
2529
2530 if (PA_SOURCE_IS_LINKED(s->state))
2531 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2532 else
2533 latency = s->thread_info.fixed_latency;
2534
2535 return latency;
2536 }
2537
2538 /* Called from IO thread */
2539 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2540 pa_source_assert_ref(s);
2541 pa_source_assert_io_context(s);
2542
2543 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2544 pa_assert(latency == 0);
2545 s->thread_info.fixed_latency = 0;
2546
2547 return;
2548 }
2549
2550 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2551 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2552
2553 if (s->thread_info.fixed_latency == latency)
2554 return;
2555
2556 s->thread_info.fixed_latency = latency;
2557
2558 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2559 pa_source_output *o;
2560 void *state = NULL;
2561
2562 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2563 if (o->update_source_fixed_latency)
2564 o->update_source_fixed_latency(o);
2565 }
2566
2567 pa_source_invalidate_requested_latency(s, false);
2568 }
2569
2570 /* Called from main thread */
2571 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2572 pa_source_assert_ref(s);
2573
2574 s->latency_offset = offset;
2575
2576 if (PA_SOURCE_IS_LINKED(s->state))
2577 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2578 else
2579 s->thread_info.latency_offset = offset;
2580 }
2581
2582 /* Called from main thread */
2583 size_t pa_source_get_max_rewind(pa_source *s) {
2584 size_t r;
2585 pa_assert_ctl_context();
2586 pa_source_assert_ref(s);
2587
2588 if (!PA_SOURCE_IS_LINKED(s->state))
2589 return s->thread_info.max_rewind;
2590
2591 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2592
2593 return r;
2594 }
2595
2596 /* Called from main context */
2597 int pa_source_set_port(pa_source *s, const char *name, bool save) {
2598 pa_device_port *port;
2599 int ret;
2600
2601 pa_source_assert_ref(s);
2602 pa_assert_ctl_context();
2603
2604 if (!s->set_port) {
2605 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2606 return -PA_ERR_NOTIMPLEMENTED;
2607 }
2608
2609 if (!name)
2610 return -PA_ERR_NOENTITY;
2611
2612 if (!(port = pa_hashmap_get(s->ports, name)))
2613 return -PA_ERR_NOENTITY;
2614
2615 if (s->active_port == port) {
2616 s->save_port = s->save_port || save;
2617 return 0;
2618 }
2619
2620 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2621 struct source_message_set_port msg = { .port = port, .ret = 0 };
2622 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2623 ret = msg.ret;
2624 }
2625 else
2626 ret = s->set_port(s, port);
2627
2628 if (ret < 0)
2629 return -PA_ERR_NOENTITY;
2630
2631 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2632
2633 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2634
2635 s->active_port = port;
2636 s->save_port = save;
2637
2638 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2639
2640 return 0;
2641 }
2642
2643 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2644
2645 /* Called from the IO thread. */
2646 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2647 pa_source_volume_change *c;
2648 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2649 c = pa_xnew(pa_source_volume_change, 1);
2650
2651 PA_LLIST_INIT(pa_source_volume_change, c);
2652 c->at = 0;
2653 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2654 return c;
2655 }
2656
2657 /* Called from the IO thread. */
2658 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2659 pa_assert(c);
2660 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2661 pa_xfree(c);
2662 }
2663
2664 /* Called from the IO thread. */
2665 void pa_source_volume_change_push(pa_source *s) {
2666 pa_source_volume_change *c = NULL;
2667 pa_source_volume_change *nc = NULL;
2668 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2669
2670 const char *direction = NULL;
2671
2672 pa_assert(s);
2673 nc = pa_source_volume_change_new(s);
2674
2675 /* NOTE: There is already more different volumes in pa_source that I can remember.
2676 * Adding one more volume for HW would get us rid of this, but I am trying
2677 * to survive with the ones we already have. */
2678 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2679
2680 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2681 pa_log_debug("Volume not changing");
2682 pa_source_volume_change_free(nc);
2683 return;
2684 }
2685
2686 nc->at = pa_source_get_latency_within_thread(s);
2687 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2688
2689 if (s->thread_info.volume_changes_tail) {
2690 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2691 /* If volume is going up let's do it a bit late. If it is going
2692 * down let's do it a bit early. */
2693 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2694 if (nc->at + safety_margin > c->at) {
2695 nc->at += safety_margin;
2696 direction = "up";
2697 break;
2698 }
2699 }
2700 else if (nc->at - safety_margin > c->at) {
2701 nc->at -= safety_margin;
2702 direction = "down";
2703 break;
2704 }
2705 }
2706 }
2707
2708 if (c == NULL) {
2709 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2710 nc->at += safety_margin;
2711 direction = "up";
2712 } else {
2713 nc->at -= safety_margin;
2714 direction = "down";
2715 }
2716 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2717 }
2718 else {
2719 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2720 }
2721
2722 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2723
2724 /* We can ignore volume events that came earlier but should happen later than this. */
2725 PA_LLIST_FOREACH(c, nc->next) {
2726 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2727 pa_source_volume_change_free(c);
2728 }
2729 nc->next = NULL;
2730 s->thread_info.volume_changes_tail = nc;
2731 }
2732
2733 /* Called from the IO thread. */
2734 static void pa_source_volume_change_flush(pa_source *s) {
2735 pa_source_volume_change *c = s->thread_info.volume_changes;
2736 pa_assert(s);
2737 s->thread_info.volume_changes = NULL;
2738 s->thread_info.volume_changes_tail = NULL;
2739 while (c) {
2740 pa_source_volume_change *next = c->next;
2741 pa_source_volume_change_free(c);
2742 c = next;
2743 }
2744 }
2745
2746 /* Called from the IO thread. */
2747 bool pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2748 pa_usec_t now;
2749 bool ret = false;
2750
2751 pa_assert(s);
2752
2753 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2754 if (usec_to_next)
2755 *usec_to_next = 0;
2756 return ret;
2757 }
2758
2759 pa_assert(s->write_volume);
2760
2761 now = pa_rtclock_now();
2762
2763 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2764 pa_source_volume_change *c = s->thread_info.volume_changes;
2765 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2766 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2767 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2768 ret = true;
2769 s->thread_info.current_hw_volume = c->hw_volume;
2770 pa_source_volume_change_free(c);
2771 }
2772
2773 if (ret)
2774 s->write_volume(s);
2775
2776 if (s->thread_info.volume_changes) {
2777 if (usec_to_next)
2778 *usec_to_next = s->thread_info.volume_changes->at - now;
2779 if (pa_log_ratelimit(PA_LOG_DEBUG))
2780 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2781 }
2782 else {
2783 if (usec_to_next)
2784 *usec_to_next = 0;
2785 s->thread_info.volume_changes_tail = NULL;
2786 }
2787 return ret;
2788 }
2789
2790 /* Called from the main thread */
2791 /* Gets the list of formats supported by the source. The members and idxset must
2792 * be freed by the caller. */
2793 pa_idxset* pa_source_get_formats(pa_source *s) {
2794 pa_idxset *ret;
2795
2796 pa_assert(s);
2797
2798 if (s->get_formats) {
2799 /* Source supports format query, all is good */
2800 ret = s->get_formats(s);
2801 } else {
2802 /* Source doesn't support format query, so assume it does PCM */
2803 pa_format_info *f = pa_format_info_new();
2804 f->encoding = PA_ENCODING_PCM;
2805
2806 ret = pa_idxset_new(NULL, NULL);
2807 pa_idxset_put(ret, f, NULL);
2808 }
2809
2810 return ret;
2811 }
2812
2813 /* Called from the main thread */
2814 /* Checks if the source can accept this format */
2815 bool pa_source_check_format(pa_source *s, pa_format_info *f) {
2816 pa_idxset *formats = NULL;
2817 bool ret = false;
2818
2819 pa_assert(s);
2820 pa_assert(f);
2821
2822 formats = pa_source_get_formats(s);
2823
2824 if (formats) {
2825 pa_format_info *finfo_device;
2826 uint32_t i;
2827
2828 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2829 if (pa_format_info_is_compatible(finfo_device, f)) {
2830 ret = true;
2831 break;
2832 }
2833 }
2834
2835 pa_idxset_free(formats, (pa_free_cb_t) pa_format_info_free);
2836 }
2837
2838 return ret;
2839 }
2840
2841 /* Called from the main thread */
2842 /* Calculates the intersection between formats supported by the source and
2843 * in_formats, and returns these, in the order of the source's formats. */
2844 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2845 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2846 pa_format_info *f_source, *f_in;
2847 uint32_t i, j;
2848
2849 pa_assert(s);
2850
2851 if (!in_formats || pa_idxset_isempty(in_formats))
2852 goto done;
2853
2854 source_formats = pa_source_get_formats(s);
2855
2856 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2857 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2858 if (pa_format_info_is_compatible(f_source, f_in))
2859 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2860 }
2861 }
2862
2863 done:
2864 if (source_formats)
2865 pa_idxset_free(source_formats, (pa_free_cb_t) pa_format_info_free);
2866
2867 return out_formats;
2868 }
2869
2870 /* Called from the main thread. */
2871 void pa_source_set_reference_volume_direct(pa_source *s, const pa_cvolume *volume) {
2872 pa_cvolume old_volume;
2873 char old_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2874 char new_volume_str[PA_CVOLUME_SNPRINT_VERBOSE_MAX];
2875
2876 pa_assert(s);
2877 pa_assert(volume);
2878
2879 old_volume = s->reference_volume;
2880
2881 if (pa_cvolume_equal(volume, &old_volume))
2882 return;
2883
2884 s->reference_volume = *volume;
2885 pa_log_debug("The reference volume of source %s changed from %s to %s.", s->name,
2886 pa_cvolume_snprint_verbose(old_volume_str, sizeof(old_volume_str), &old_volume, &s->channel_map,
2887 s->flags & PA_SOURCE_DECIBEL_VOLUME),
2888 pa_cvolume_snprint_verbose(new_volume_str, sizeof(new_volume_str), volume, &s->channel_map,
2889 s->flags & PA_SOURCE_DECIBEL_VOLUME));
2890
2891 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2892 }