]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
source-output: Ensure no volumes are applied for passthrough streams
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/sample-util.h>
44 #include <pulsecore/flist.h>
45
46 #include "source.h"
47
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53
54 struct pa_source_volume_change {
55 pa_usec_t at;
56 pa_cvolume hw_volume;
57
58 PA_LLIST_FIELDS(pa_source_volume_change);
59 };
60
61 struct source_message_set_port {
62 pa_device_port *port;
63 int ret;
64 };
65
66 static void source_free(pa_object *o);
67
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
70
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
72 pa_assert(data);
73
74 pa_zero(*data);
75 data->proplist = pa_proplist_new();
76
77 return data;
78 }
79
80 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
81 pa_assert(data);
82
83 pa_xfree(data->name);
84 data->name = pa_xstrdup(name);
85 }
86
87 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
88 pa_assert(data);
89
90 if ((data->sample_spec_is_set = !!spec))
91 data->sample_spec = *spec;
92 }
93
94 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
95 pa_assert(data);
96
97 if ((data->channel_map_is_set = !!map))
98 data->channel_map = *map;
99 }
100
101 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
102 pa_assert(data);
103
104 if ((data->volume_is_set = !!volume))
105 data->volume = *volume;
106 }
107
108 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
109 pa_assert(data);
110
111 data->muted_is_set = TRUE;
112 data->muted = !!mute;
113 }
114
115 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
116 pa_assert(data);
117
118 pa_xfree(data->active_port);
119 data->active_port = pa_xstrdup(port);
120 }
121
122 void pa_source_new_data_done(pa_source_new_data *data) {
123 pa_assert(data);
124
125 pa_proplist_free(data->proplist);
126
127 if (data->ports) {
128 pa_device_port *p;
129
130 while ((p = pa_hashmap_steal_first(data->ports)))
131 pa_device_port_free(p);
132
133 pa_hashmap_free(data->ports, NULL, NULL);
134 }
135
136 pa_xfree(data->name);
137 pa_xfree(data->active_port);
138 }
139
140 /* Called from main context */
141 static void reset_callbacks(pa_source *s) {
142 pa_assert(s);
143
144 s->set_state = NULL;
145 s->get_volume = NULL;
146 s->set_volume = NULL;
147 s->write_volume = NULL;
148 s->get_mute = NULL;
149 s->set_mute = NULL;
150 s->update_requested_latency = NULL;
151 s->set_port = NULL;
152 s->get_formats = NULL;
153 }
154
155 /* Called from main context */
156 pa_source* pa_source_new(
157 pa_core *core,
158 pa_source_new_data *data,
159 pa_source_flags_t flags) {
160
161 pa_source *s;
162 const char *name;
163 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
164 char *pt;
165
166 pa_assert(core);
167 pa_assert(data);
168 pa_assert(data->name);
169 pa_assert_ctl_context();
170
171 s = pa_msgobject_new(pa_source);
172
173 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
174 pa_log_debug("Failed to register name %s.", data->name);
175 pa_xfree(s);
176 return NULL;
177 }
178
179 pa_source_new_data_set_name(data, name);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 /* FIXME, need to free s here on failure */
188
189 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
190 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
191
192 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
193
194 if (!data->channel_map_is_set)
195 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
196
197 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
198 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
199
200 /* FIXME: There should probably be a general function for checking whether
201 * the source volume is allowed to be set, like there is for source outputs. */
202 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
203
204 if (!data->volume_is_set) {
205 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
206 data->save_volume = FALSE;
207 }
208
209 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
210 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
211
212 if (!data->muted_is_set)
213 data->muted = FALSE;
214
215 if (data->card)
216 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
217
218 pa_device_init_description(data->proplist);
219 pa_device_init_icon(data->proplist, FALSE);
220 pa_device_init_intended_roles(data->proplist);
221
222 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
223 pa_xfree(s);
224 pa_namereg_unregister(core, name);
225 return NULL;
226 }
227
228 s->parent.parent.free = source_free;
229 s->parent.process_msg = pa_source_process_msg;
230
231 s->core = core;
232 s->state = PA_SOURCE_INIT;
233 s->flags = flags;
234 s->priority = 0;
235 s->suspend_cause = 0;
236 s->name = pa_xstrdup(name);
237 s->proplist = pa_proplist_copy(data->proplist);
238 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
239 s->module = data->module;
240 s->card = data->card;
241
242 s->priority = pa_device_init_priority(s->proplist);
243
244 s->sample_spec = data->sample_spec;
245 s->channel_map = data->channel_map;
246
247 s->outputs = pa_idxset_new(NULL, NULL);
248 s->n_corked = 0;
249 s->monitor_of = NULL;
250 s->output_from_master = NULL;
251
252 s->reference_volume = s->real_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.max_rewind = 0;
301 s->thread_info.requested_latency_valid = FALSE;
302 s->thread_info.requested_latency = 0;
303 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
304 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
305 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
306
307 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
308 s->thread_info.volume_changes_tail = NULL;
309 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
310 s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
311 s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
312
313 /* FIXME: This should probably be moved to pa_source_put() */
314 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 return s;
329 }
330
331 /* Called from main context */
332 static int source_set_state(pa_source *s, pa_source_state_t state) {
333 int ret;
334 pa_bool_t suspend_change;
335 pa_source_state_t original_state;
336
337 pa_assert(s);
338 pa_assert_ctl_context();
339
340 if (s->state == state)
341 return 0;
342
343 original_state = s->state;
344
345 suspend_change =
346 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
347 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
348
349 if (s->set_state)
350 if ((ret = s->set_state(s, state)) < 0)
351 return ret;
352
353 if (s->asyncmsgq)
354 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
355
356 if (s->set_state)
357 s->set_state(s, original_state);
358
359 return ret;
360 }
361
362 s->state = state;
363
364 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the apropriate events */
365 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
366 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
367 }
368
369 if (suspend_change) {
370 pa_source_output *o;
371 uint32_t idx;
372
373 /* We're suspending or resuming, tell everyone about it */
374
375 PA_IDXSET_FOREACH(o, s->outputs, idx)
376 if (s->state == PA_SOURCE_SUSPENDED &&
377 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
378 pa_source_output_kill(o);
379 else if (o->suspend)
380 o->suspend(o, state == PA_SOURCE_SUSPENDED);
381 }
382
383 return 0;
384 }
385
386 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
387 pa_assert(s);
388
389 s->get_volume = cb;
390 }
391
392 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
393 pa_source_flags_t flags;
394
395 pa_assert(s);
396 pa_assert(!s->write_volume || cb);
397
398 s->set_volume = cb;
399
400 /* Save the current flags so we can tell if they've changed */
401 flags = s->flags;
402
403 if (cb) {
404 /* The source implementor is responsible for setting decibel volume support */
405 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
406 } else {
407 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
408 /* See note below in pa_source_put() about volume sharing and decibel volumes */
409 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
410 }
411
412 /* If the flags have changed after init, let any clients know via a change event */
413 if (s->state != PA_SOURCE_INIT && flags != s->flags)
414 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
415 }
416
417 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
418 pa_source_flags_t flags;
419
420 pa_assert(s);
421 pa_assert(!cb || s->set_volume);
422
423 s->write_volume = cb;
424
425 /* Save the current flags so we can tell if they've changed */
426 flags = s->flags;
427
428 if (cb)
429 s->flags |= PA_SOURCE_SYNC_VOLUME;
430 else
431 s->flags &= ~PA_SOURCE_SYNC_VOLUME;
432
433 /* If the flags have changed after init, let any clients know via a change event */
434 if (s->state != PA_SOURCE_INIT && flags != s->flags)
435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
436 }
437
438 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
439 pa_assert(s);
440
441 s->get_mute = cb;
442 }
443
444 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
445 pa_source_flags_t flags;
446
447 pa_assert(s);
448
449 s->set_mute = cb;
450
451 /* Save the current flags so we can tell if they've changed */
452 flags = s->flags;
453
454 if (cb)
455 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
456 else
457 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
458
459 /* If the flags have changed after init, let any clients know via a change event */
460 if (s->state != PA_SOURCE_INIT && flags != s->flags)
461 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
462 }
463
464 static void enable_flat_volume(pa_source *s, pa_bool_t enable) {
465 pa_source_flags_t flags;
466
467 pa_assert(s);
468
469 /* Always follow the overall user preference here */
470 enable = enable && s->core->flat_volumes;
471
472 /* Save the current flags so we can tell if they've changed */
473 flags = s->flags;
474
475 if (enable)
476 s->flags |= PA_SOURCE_FLAT_VOLUME;
477 else
478 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
479
480 /* If the flags have changed after init, let any clients know via a change event */
481 if (s->state != PA_SOURCE_INIT && flags != s->flags)
482 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
483 }
484
485 void pa_source_enable_decibel_volume(pa_source *s, pa_bool_t enable) {
486 pa_source_flags_t flags;
487
488 pa_assert(s);
489
490 /* Save the current flags so we can tell if they've changed */
491 flags = s->flags;
492
493 if (enable) {
494 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
495 enable_flat_volume(s, TRUE);
496 } else {
497 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
498 enable_flat_volume(s, FALSE);
499 }
500
501 /* If the flags have changed after init, let any clients know via a change event */
502 if (s->state != PA_SOURCE_INIT && flags != s->flags)
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
504 }
505
506 /* Called from main context */
507 void pa_source_put(pa_source *s) {
508 pa_source_assert_ref(s);
509 pa_assert_ctl_context();
510
511 pa_assert(s->state == PA_SOURCE_INIT);
512 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
513
514 /* The following fields must be initialized properly when calling _put() */
515 pa_assert(s->asyncmsgq);
516 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
517
518 /* Generally, flags should be initialized via pa_source_new(). As a
519 * special exception we allow some volume related flags to be set
520 * between _new() and _put() by the callback setter functions above.
521 *
522 * Thus we implement a couple safeguards here which ensure the above
523 * setters were used (or at least the implementor made manual changes
524 * in a compatible way).
525 *
526 * Note: All of these flags set here can change over the life time
527 * of the source. */
528 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
529 pa_assert(!(s->flags & PA_SOURCE_SYNC_VOLUME) || s->write_volume);
530 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
531
532 /* XXX: Currently decibel volume is disabled for all sources that use volume
533 * sharing. When the master source supports decibel volume, it would be good
534 * to have the flag also in the filter source, but currently we don't do that
535 * so that the flags of the filter source never change when it's moved from
536 * a master source to another. One solution for this problem would be to
537 * remove user-visible volume altogether from filter sources when volume
538 * sharing is used, but the current approach was easier to implement... */
539 /* We always support decibel volumes in software, otherwise we leave it to
540 * the source implementor to set this flag as needed.
541 *
542 * Note: This flag can also change over the life time of the source. */
543 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
544 pa_source_enable_decibel_volume(s, TRUE);
545
546 /* If the source implementor support DB volumes by itself, we should always
547 * try and enable flat volumes too */
548 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
549 enable_flat_volume(s, TRUE);
550
551 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
552 pa_source *root_source = s->output_from_master->source;
553
554 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
555 root_source = root_source->output_from_master->source;
556
557 s->reference_volume = root_source->reference_volume;
558 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
559
560 s->real_volume = root_source->real_volume;
561 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
562 } else
563 /* We assume that if the sink implementor changed the default
564 * volume he did so in real_volume, because that is the usual
565 * place where he is supposed to place his changes. */
566 s->reference_volume = s->real_volume;
567
568 s->thread_info.soft_volume = s->soft_volume;
569 s->thread_info.soft_muted = s->muted;
570 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
571
572 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
573 || (s->base_volume == PA_VOLUME_NORM
574 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
575 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
576 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
577
578 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
579
580 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
581 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
582 }
583
584 /* Called from main context */
585 void pa_source_unlink(pa_source *s) {
586 pa_bool_t linked;
587 pa_source_output *o, *j = NULL;
588
589 pa_assert(s);
590 pa_assert_ctl_context();
591
592 /* See pa_sink_unlink() for a couple of comments how this function
593 * works. */
594
595 linked = PA_SOURCE_IS_LINKED(s->state);
596
597 if (linked)
598 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
599
600 if (s->state != PA_SOURCE_UNLINKED)
601 pa_namereg_unregister(s->core, s->name);
602 pa_idxset_remove_by_data(s->core->sources, s, NULL);
603
604 if (s->card)
605 pa_idxset_remove_by_data(s->card->sources, s, NULL);
606
607 while ((o = pa_idxset_first(s->outputs, NULL))) {
608 pa_assert(o != j);
609 pa_source_output_kill(o);
610 j = o;
611 }
612
613 if (linked)
614 source_set_state(s, PA_SOURCE_UNLINKED);
615 else
616 s->state = PA_SOURCE_UNLINKED;
617
618 reset_callbacks(s);
619
620 if (linked) {
621 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
622 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
623 }
624 }
625
626 /* Called from main context */
627 static void source_free(pa_object *o) {
628 pa_source_output *so;
629 pa_source *s = PA_SOURCE(o);
630
631 pa_assert(s);
632 pa_assert_ctl_context();
633 pa_assert(pa_source_refcnt(s) == 0);
634
635 if (PA_SOURCE_IS_LINKED(s->state))
636 pa_source_unlink(s);
637
638 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
639
640 pa_idxset_free(s->outputs, NULL, NULL);
641
642 while ((so = pa_hashmap_steal_first(s->thread_info.outputs)))
643 pa_source_output_unref(so);
644
645 pa_hashmap_free(s->thread_info.outputs, NULL, NULL);
646
647 if (s->silence.memblock)
648 pa_memblock_unref(s->silence.memblock);
649
650 pa_xfree(s->name);
651 pa_xfree(s->driver);
652
653 if (s->proplist)
654 pa_proplist_free(s->proplist);
655
656 if (s->ports) {
657 pa_device_port *p;
658
659 while ((p = pa_hashmap_steal_first(s->ports)))
660 pa_device_port_free(p);
661
662 pa_hashmap_free(s->ports, NULL, NULL);
663 }
664
665 pa_xfree(s);
666 }
667
668 /* Called from main context, and not while the IO thread is active, please */
669 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
670 pa_source_assert_ref(s);
671 pa_assert_ctl_context();
672
673 s->asyncmsgq = q;
674 }
675
676 /* Called from main context, and not while the IO thread is active, please */
677 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
678 pa_source_assert_ref(s);
679 pa_assert_ctl_context();
680
681 if (mask == 0)
682 return;
683
684 /* For now, allow only a minimal set of flags to be changed. */
685 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
686
687 s->flags = (s->flags & ~mask) | (value & mask);
688 }
689
690 /* Called from IO context, or before _put() from main context */
691 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
692 pa_source_assert_ref(s);
693 pa_source_assert_io_context(s);
694
695 s->thread_info.rtpoll = p;
696 }
697
698 /* Called from main context */
699 int pa_source_update_status(pa_source*s) {
700 pa_source_assert_ref(s);
701 pa_assert_ctl_context();
702 pa_assert(PA_SOURCE_IS_LINKED(s->state));
703
704 if (s->state == PA_SOURCE_SUSPENDED)
705 return 0;
706
707 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
708 }
709
710 /* Called from main context */
711 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
712 pa_source_assert_ref(s);
713 pa_assert_ctl_context();
714 pa_assert(PA_SOURCE_IS_LINKED(s->state));
715 pa_assert(cause != 0);
716
717 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
718 return -PA_ERR_NOTSUPPORTED;
719
720 if (suspend)
721 s->suspend_cause |= cause;
722 else
723 s->suspend_cause &= ~cause;
724
725 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
726 return 0;
727
728 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
729
730 if (s->suspend_cause)
731 return source_set_state(s, PA_SOURCE_SUSPENDED);
732 else
733 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
734 }
735
736 /* Called from main context */
737 int pa_source_sync_suspend(pa_source *s) {
738 pa_sink_state_t state;
739
740 pa_source_assert_ref(s);
741 pa_assert_ctl_context();
742 pa_assert(PA_SOURCE_IS_LINKED(s->state));
743 pa_assert(s->monitor_of);
744
745 state = pa_sink_get_state(s->monitor_of);
746
747 if (state == PA_SINK_SUSPENDED)
748 return source_set_state(s, PA_SOURCE_SUSPENDED);
749
750 pa_assert(PA_SINK_IS_OPENED(state));
751
752 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
753 }
754
755 /* Called from main context */
756 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
757 pa_source_output *o, *n;
758 uint32_t idx;
759
760 pa_source_assert_ref(s);
761 pa_assert_ctl_context();
762 pa_assert(PA_SOURCE_IS_LINKED(s->state));
763
764 if (!q)
765 q = pa_queue_new();
766
767 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
768 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
769
770 pa_source_output_ref(o);
771
772 if (pa_source_output_start_move(o) >= 0)
773 pa_queue_push(q, o);
774 else
775 pa_source_output_unref(o);
776 }
777
778 return q;
779 }
780
781 /* Called from main context */
782 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
783 pa_source_output *o;
784
785 pa_source_assert_ref(s);
786 pa_assert_ctl_context();
787 pa_assert(PA_SOURCE_IS_LINKED(s->state));
788 pa_assert(q);
789
790 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
791 if (pa_source_output_finish_move(o, s, save) < 0)
792 pa_source_output_fail_move(o);
793
794 pa_source_output_unref(o);
795 }
796
797 pa_queue_free(q, NULL, NULL);
798 }
799
800 /* Called from main context */
801 void pa_source_move_all_fail(pa_queue *q) {
802 pa_source_output *o;
803
804 pa_assert_ctl_context();
805 pa_assert(q);
806
807 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
808 pa_source_output_fail_move(o);
809 pa_source_output_unref(o);
810 }
811
812 pa_queue_free(q, NULL, NULL);
813 }
814
815 /* Called from IO thread context */
816 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
817 pa_source_output *o;
818 void *state = NULL;
819
820 pa_source_assert_ref(s);
821 pa_source_assert_io_context(s);
822 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
823
824 if (nbytes <= 0)
825 return;
826
827 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
828 return;
829
830 pa_log_debug("Processing rewind...");
831
832 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
833 pa_source_output_assert_ref(o);
834 pa_source_output_process_rewind(o, nbytes);
835 }
836 }
837
838 /* Called from IO thread context */
839 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
840 pa_source_output *o;
841 void *state = NULL;
842
843 pa_source_assert_ref(s);
844 pa_source_assert_io_context(s);
845 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
846 pa_assert(chunk);
847
848 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
849 return;
850
851 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
852 pa_memchunk vchunk = *chunk;
853
854 pa_memblock_ref(vchunk.memblock);
855 pa_memchunk_make_writable(&vchunk, 0);
856
857 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
858 pa_silence_memchunk(&vchunk, &s->sample_spec);
859 else
860 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
861
862 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
863 pa_source_output_assert_ref(o);
864
865 if (!o->thread_info.direct_on_input)
866 pa_source_output_push(o, &vchunk);
867 }
868
869 pa_memblock_unref(vchunk.memblock);
870 } else {
871
872 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
873 pa_source_output_assert_ref(o);
874
875 if (!o->thread_info.direct_on_input)
876 pa_source_output_push(o, chunk);
877 }
878 }
879 }
880
881 /* Called from IO thread context */
882 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
883 pa_source_assert_ref(s);
884 pa_source_assert_io_context(s);
885 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
886 pa_source_output_assert_ref(o);
887 pa_assert(o->thread_info.direct_on_input);
888 pa_assert(chunk);
889
890 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
891 return;
892
893 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
894 pa_memchunk vchunk = *chunk;
895
896 pa_memblock_ref(vchunk.memblock);
897 pa_memchunk_make_writable(&vchunk, 0);
898
899 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
900 pa_silence_memchunk(&vchunk, &s->sample_spec);
901 else
902 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
903
904 pa_source_output_push(o, &vchunk);
905
906 pa_memblock_unref(vchunk.memblock);
907 } else
908 pa_source_output_push(o, chunk);
909 }
910
911 /* Called from main thread */
912 pa_usec_t pa_source_get_latency(pa_source *s) {
913 pa_usec_t usec;
914
915 pa_source_assert_ref(s);
916 pa_assert_ctl_context();
917 pa_assert(PA_SOURCE_IS_LINKED(s->state));
918
919 if (s->state == PA_SOURCE_SUSPENDED)
920 return 0;
921
922 if (!(s->flags & PA_SOURCE_LATENCY))
923 return 0;
924
925 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
926
927 return usec;
928 }
929
930 /* Called from IO thread */
931 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
932 pa_usec_t usec = 0;
933 pa_msgobject *o;
934
935 pa_source_assert_ref(s);
936 pa_source_assert_io_context(s);
937 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
938
939 /* The returned value is supposed to be in the time domain of the sound card! */
940
941 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
942 return 0;
943
944 if (!(s->flags & PA_SOURCE_LATENCY))
945 return 0;
946
947 o = PA_MSGOBJECT(s);
948
949 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
950
951 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
952 return -1;
953
954 return usec;
955 }
956
957 /* Called from the main thread (and also from the IO thread while the main
958 * thread is waiting).
959 *
960 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
961 * set. Instead, flat volume mode is detected by checking whether the root source
962 * has the flag set. */
963 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
964 pa_source_assert_ref(s);
965
966 while (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
967 s = s->output_from_master->source;
968
969 return (s->flags & PA_SOURCE_FLAT_VOLUME);
970 }
971
972 /* Called from main context */
973 pa_bool_t pa_source_is_passthrough(pa_source *s) {
974
975 pa_source_assert_ref(s);
976
977 /* NB Currently only monitor sources support passthrough mode */
978 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
979 }
980
981 /* Called from main context */
982 void pa_source_enter_passthrough(pa_source *s) {
983 pa_cvolume volume;
984
985 /* set the volume to NORM */
986 s->saved_volume = *pa_source_get_volume(s, TRUE);
987 s->saved_save_volume = s->save_volume;
988
989 pa_cvolume_set(&volume, s->sample_spec.channels, PA_VOLUME_NORM);
990 pa_source_set_volume(s, &volume, TRUE, FALSE);
991 }
992
993 /* Called from main context */
994 void pa_source_leave_passthrough(pa_source *s) {
995 /* Restore source volume to what it was before we entered passthrough mode */
996 pa_source_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
997
998 pa_cvolume_init(&s->saved_volume);
999 s->saved_save_volume = FALSE;
1000 }
1001
1002 /* Called from main context. */
1003 static void compute_reference_ratio(pa_source_output *o) {
1004 unsigned c = 0;
1005 pa_cvolume remapped;
1006
1007 pa_assert(o);
1008 pa_assert(pa_source_flat_volume_enabled(o->source));
1009
1010 /*
1011 * Calculates the reference ratio from the source's reference
1012 * volume. This basically calculates:
1013 *
1014 * o->reference_ratio = o->volume / o->source->reference_volume
1015 */
1016
1017 remapped = o->source->reference_volume;
1018 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1019
1020 o->reference_ratio.channels = o->sample_spec.channels;
1021
1022 for (c = 0; c < o->sample_spec.channels; c++) {
1023
1024 /* We don't update when the source volume is 0 anyway */
1025 if (remapped.values[c] <= PA_VOLUME_MUTED)
1026 continue;
1027
1028 /* Don't update the reference ratio unless necessary */
1029 if (pa_sw_volume_multiply(
1030 o->reference_ratio.values[c],
1031 remapped.values[c]) == o->volume.values[c])
1032 continue;
1033
1034 o->reference_ratio.values[c] = pa_sw_volume_divide(
1035 o->volume.values[c],
1036 remapped.values[c]);
1037 }
1038 }
1039
1040 /* Called from main context. Only called for the root source in volume sharing
1041 * cases, except for internal recursive calls. */
1042 static void compute_reference_ratios(pa_source *s) {
1043 uint32_t idx;
1044 pa_source_output *o;
1045
1046 pa_source_assert_ref(s);
1047 pa_assert_ctl_context();
1048 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1049 pa_assert(pa_source_flat_volume_enabled(s));
1050
1051 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1052 compute_reference_ratio(o);
1053
1054 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1055 compute_reference_ratios(o->destination_source);
1056 }
1057 }
1058
1059 /* Called from main context. Only called for the root source in volume sharing
1060 * cases, except for internal recursive calls. */
1061 static void compute_real_ratios(pa_source *s) {
1062 pa_source_output *o;
1063 uint32_t idx;
1064
1065 pa_source_assert_ref(s);
1066 pa_assert_ctl_context();
1067 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1068 pa_assert(pa_source_flat_volume_enabled(s));
1069
1070 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1071 unsigned c;
1072 pa_cvolume remapped;
1073
1074 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1075 /* The origin source uses volume sharing, so this input's real ratio
1076 * is handled as a special case - the real ratio must be 0 dB, and
1077 * as a result i->soft_volume must equal i->volume_factor. */
1078 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1079 o->soft_volume = o->volume_factor;
1080
1081 compute_real_ratios(o->destination_source);
1082
1083 continue;
1084 }
1085
1086 /*
1087 * This basically calculates:
1088 *
1089 * i->real_ratio := i->volume / s->real_volume
1090 * i->soft_volume := i->real_ratio * i->volume_factor
1091 */
1092
1093 remapped = s->real_volume;
1094 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1095
1096 o->real_ratio.channels = o->sample_spec.channels;
1097 o->soft_volume.channels = o->sample_spec.channels;
1098
1099 for (c = 0; c < o->sample_spec.channels; c++) {
1100
1101 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1102 /* We leave o->real_ratio untouched */
1103 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1104 continue;
1105 }
1106
1107 /* Don't lose accuracy unless necessary */
1108 if (pa_sw_volume_multiply(
1109 o->real_ratio.values[c],
1110 remapped.values[c]) != o->volume.values[c])
1111
1112 o->real_ratio.values[c] = pa_sw_volume_divide(
1113 o->volume.values[c],
1114 remapped.values[c]);
1115
1116 o->soft_volume.values[c] = pa_sw_volume_multiply(
1117 o->real_ratio.values[c],
1118 o->volume_factor.values[c]);
1119 }
1120
1121 /* We don't copy the soft_volume to the thread_info data
1122 * here. That must be done by the caller */
1123 }
1124 }
1125
1126 static pa_cvolume *cvolume_remap_minimal_impact(
1127 pa_cvolume *v,
1128 const pa_cvolume *template,
1129 const pa_channel_map *from,
1130 const pa_channel_map *to) {
1131
1132 pa_cvolume t;
1133
1134 pa_assert(v);
1135 pa_assert(template);
1136 pa_assert(from);
1137 pa_assert(to);
1138 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1139 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1140
1141 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1142 * mapping from source output to source volumes:
1143 *
1144 * If template is a possible remapping from v it is used instead
1145 * of remapping anew.
1146 *
1147 * If the channel maps don't match we set an all-channel volume on
1148 * the source to ensure that changing a volume on one stream has no
1149 * effect that cannot be compensated for in another stream that
1150 * does not have the same channel map as the source. */
1151
1152 if (pa_channel_map_equal(from, to))
1153 return v;
1154
1155 t = *template;
1156 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1157 *v = *template;
1158 return v;
1159 }
1160
1161 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1162 return v;
1163 }
1164
1165 /* Called from main thread. Only called for the root source in volume sharing
1166 * cases, except for internal recursive calls. */
1167 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1168 pa_source_output *o;
1169 uint32_t idx;
1170
1171 pa_source_assert_ref(s);
1172 pa_assert(max_volume);
1173 pa_assert(channel_map);
1174 pa_assert(pa_source_flat_volume_enabled(s));
1175
1176 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1177 pa_cvolume remapped;
1178
1179 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1180 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1181
1182 /* Ignore this output. The origin source uses volume sharing, so this
1183 * output's volume will be set to be equal to the root source's real
1184 * volume. Obviously this outputs's current volume must not then
1185 * affect what the root source's real volume will be. */
1186 continue;
1187 }
1188
1189 remapped = o->volume;
1190 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1191 pa_cvolume_merge(max_volume, max_volume, &remapped);
1192 }
1193 }
1194
1195 /* Called from main thread. Only called for the root source in volume sharing
1196 * cases, except for internal recursive calls. */
1197 static pa_bool_t has_outputs(pa_source *s) {
1198 pa_source_output *o;
1199 uint32_t idx;
1200
1201 pa_source_assert_ref(s);
1202
1203 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1204 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1205 return TRUE;
1206 }
1207
1208 return FALSE;
1209 }
1210
1211 /* Called from main thread. Only called for the root source in volume sharing
1212 * cases, except for internal recursive calls. */
1213 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1214 pa_source_output *o;
1215 uint32_t idx;
1216
1217 pa_source_assert_ref(s);
1218 pa_assert(new_volume);
1219 pa_assert(channel_map);
1220
1221 s->real_volume = *new_volume;
1222 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1223
1224 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1225 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1226 if (pa_source_flat_volume_enabled(s)) {
1227 pa_cvolume old_volume = o->volume;
1228
1229 /* Follow the root source's real volume. */
1230 o->volume = *new_volume;
1231 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1232 compute_reference_ratio(o);
1233
1234 /* The volume changed, let's tell people so */
1235 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1236 if (o->volume_changed)
1237 o->volume_changed(o);
1238
1239 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1240 }
1241 }
1242
1243 update_real_volume(o->destination_source, new_volume, channel_map);
1244 }
1245 }
1246 }
1247
1248 /* Called from main thread. Only called for the root source in shared volume
1249 * cases. */
1250 static void compute_real_volume(pa_source *s) {
1251 pa_source_assert_ref(s);
1252 pa_assert_ctl_context();
1253 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1254 pa_assert(pa_source_flat_volume_enabled(s));
1255 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1256
1257 /* This determines the maximum volume of all streams and sets
1258 * s->real_volume accordingly. */
1259
1260 if (!has_outputs(s)) {
1261 /* In the special case that we have no source outputs we leave the
1262 * volume unmodified. */
1263 update_real_volume(s, &s->reference_volume, &s->channel_map);
1264 return;
1265 }
1266
1267 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1268
1269 /* First let's determine the new maximum volume of all outputs
1270 * connected to this source */
1271 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1272 update_real_volume(s, &s->real_volume, &s->channel_map);
1273
1274 /* Then, let's update the real ratios/soft volumes of all outputs
1275 * connected to this source */
1276 compute_real_ratios(s);
1277 }
1278
1279 /* Called from main thread. Only called for the root source in shared volume
1280 * cases, except for internal recursive calls. */
1281 static void propagate_reference_volume(pa_source *s) {
1282 pa_source_output *o;
1283 uint32_t idx;
1284
1285 pa_source_assert_ref(s);
1286 pa_assert_ctl_context();
1287 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1288 pa_assert(pa_source_flat_volume_enabled(s));
1289
1290 /* This is called whenever the source volume changes that is not
1291 * caused by a source output volume change. We need to fix up the
1292 * source output volumes accordingly */
1293
1294 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1295 pa_cvolume old_volume;
1296
1297 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1298 propagate_reference_volume(o->destination_source);
1299
1300 /* Since the origin source uses volume sharing, this output's volume
1301 * needs to be updated to match the root source's real volume, but
1302 * that will be done later in update_shared_real_volume(). */
1303 continue;
1304 }
1305
1306 old_volume = o->volume;
1307
1308 /* This basically calculates:
1309 *
1310 * o->volume := o->reference_volume * o->reference_ratio */
1311
1312 o->volume = s->reference_volume;
1313 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1314 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1315
1316 /* The volume changed, let's tell people so */
1317 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1318
1319 if (o->volume_changed)
1320 o->volume_changed(o);
1321
1322 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1323 }
1324 }
1325 }
1326
1327 /* Called from main thread. Only called for the root source in volume sharing
1328 * cases, except for internal recursive calls. The return value indicates
1329 * whether any reference volume actually changed. */
1330 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1331 pa_cvolume volume;
1332 pa_bool_t reference_volume_changed;
1333 pa_source_output *o;
1334 uint32_t idx;
1335
1336 pa_source_assert_ref(s);
1337 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1338 pa_assert(v);
1339 pa_assert(channel_map);
1340 pa_assert(pa_cvolume_valid(v));
1341
1342 volume = *v;
1343 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1344
1345 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1346 s->reference_volume = volume;
1347
1348 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1349
1350 if (reference_volume_changed)
1351 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1352 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1353 /* If the root source's volume doesn't change, then there can't be any
1354 * changes in the other source in the source tree either.
1355 *
1356 * It's probably theoretically possible that even if the root source's
1357 * volume changes slightly, some filter source doesn't change its volume
1358 * due to rounding errors. If that happens, we still want to propagate
1359 * the changed root source volume to the sources connected to the
1360 * intermediate source that didn't change its volume. This theoretical
1361 * possiblity is the reason why we have that !(s->flags &
1362 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1363 * notice even if we returned here FALSE always if
1364 * reference_volume_changed is FALSE. */
1365 return FALSE;
1366
1367 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1368 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1369 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1370 }
1371
1372 return TRUE;
1373 }
1374
1375 /* Called from main thread */
1376 void pa_source_set_volume(
1377 pa_source *s,
1378 const pa_cvolume *volume,
1379 pa_bool_t send_msg,
1380 pa_bool_t save) {
1381
1382 pa_cvolume new_reference_volume;
1383 pa_source *root_source = s;
1384
1385 pa_source_assert_ref(s);
1386 pa_assert_ctl_context();
1387 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1388 pa_assert(!volume || pa_cvolume_valid(volume));
1389 pa_assert(volume || pa_source_flat_volume_enabled(s));
1390 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1391
1392 /* make sure we don't change the volume in PASSTHROUGH mode ...
1393 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1394 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1395 pa_log_warn("Cannot change volume, Source is monitor of a PASSTHROUGH sink");
1396 return;
1397 }
1398
1399 /* In case of volume sharing, the volume is set for the root source first,
1400 * from which it's then propagated to the sharing sources. */
1401 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1402 root_source = root_source->output_from_master->source;
1403
1404 /* As a special exception we accept mono volumes on all sources --
1405 * even on those with more complex channel maps */
1406
1407 if (volume) {
1408 if (pa_cvolume_compatible(volume, &s->sample_spec))
1409 new_reference_volume = *volume;
1410 else {
1411 new_reference_volume = s->reference_volume;
1412 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1413 }
1414
1415 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1416 }
1417
1418 /* If volume is NULL we synchronize the source's real and reference
1419 * volumes with the stream volumes. If it is not NULL we update
1420 * the reference_volume with it. */
1421
1422 if (volume) {
1423 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1424 if (pa_source_flat_volume_enabled(root_source)) {
1425 /* OK, propagate this volume change back to the outputs */
1426 propagate_reference_volume(root_source);
1427
1428 /* And now recalculate the real volume */
1429 compute_real_volume(root_source);
1430 } else
1431 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1432 }
1433
1434 } else {
1435 pa_assert(pa_source_flat_volume_enabled(root_source));
1436
1437 /* Ok, let's determine the new real volume */
1438 compute_real_volume(root_source);
1439
1440 /* Let's 'push' the reference volume if necessary */
1441 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1442 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1443
1444 /* Now that the reference volume is updated, we can update the streams'
1445 * reference ratios. */
1446 compute_reference_ratios(root_source);
1447 }
1448
1449 if (root_source->set_volume) {
1450 /* If we have a function set_volume(), then we do not apply a
1451 * soft volume by default. However, set_volume() is free to
1452 * apply one to root_source->soft_volume */
1453
1454 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1455 if (!(root_source->flags & PA_SOURCE_SYNC_VOLUME))
1456 root_source->set_volume(root_source);
1457
1458 } else
1459 /* If we have no function set_volume(), then the soft volume
1460 * becomes the real volume */
1461 root_source->soft_volume = root_source->real_volume;
1462
1463 /* This tells the source that soft volume and/or real volume changed */
1464 if (send_msg)
1465 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1466 }
1467
1468 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1469 * Only to be called by source implementor */
1470 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1471
1472 pa_source_assert_ref(s);
1473 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1474
1475 if (s->flags & PA_SOURCE_SYNC_VOLUME)
1476 pa_source_assert_io_context(s);
1477 else
1478 pa_assert_ctl_context();
1479
1480 if (!volume)
1481 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1482 else
1483 s->soft_volume = *volume;
1484
1485 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_SYNC_VOLUME))
1486 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1487 else
1488 s->thread_info.soft_volume = s->soft_volume;
1489 }
1490
1491 /* Called from the main thread. Only called for the root source in volume sharing
1492 * cases, except for internal recursive calls. */
1493 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1494 pa_source_output *o;
1495 uint32_t idx;
1496
1497 pa_source_assert_ref(s);
1498 pa_assert(old_real_volume);
1499 pa_assert_ctl_context();
1500 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1501
1502 /* This is called when the hardware's real volume changes due to
1503 * some external event. We copy the real volume into our
1504 * reference volume and then rebuild the stream volumes based on
1505 * i->real_ratio which should stay fixed. */
1506
1507 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1508 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1509 return;
1510
1511 /* 1. Make the real volume the reference volume */
1512 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1513 }
1514
1515 if (pa_source_flat_volume_enabled(s)) {
1516
1517 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1518 pa_cvolume old_volume = o->volume;
1519
1520 /* 2. Since the source's reference and real volumes are equal
1521 * now our ratios should be too. */
1522 o->reference_ratio = o->real_ratio;
1523
1524 /* 3. Recalculate the new stream reference volume based on the
1525 * reference ratio and the sink's reference volume.
1526 *
1527 * This basically calculates:
1528 *
1529 * o->volume = s->reference_volume * o->reference_ratio
1530 *
1531 * This is identical to propagate_reference_volume() */
1532 o->volume = s->reference_volume;
1533 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1534 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1535
1536 /* Notify if something changed */
1537 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1538
1539 if (o->volume_changed)
1540 o->volume_changed(o);
1541
1542 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1543 }
1544
1545 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1546 propagate_real_volume(o->destination_source, old_real_volume);
1547 }
1548 }
1549
1550 /* Something got changed in the hardware. It probably makes sense
1551 * to save changed hw settings given that hw volume changes not
1552 * triggered by PA are almost certainly done by the user. */
1553 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1554 s->save_volume = TRUE;
1555 }
1556
1557 /* Called from io thread */
1558 void pa_source_update_volume_and_mute(pa_source *s) {
1559 pa_assert(s);
1560 pa_source_assert_io_context(s);
1561
1562 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1563 }
1564
1565 /* Called from main thread */
1566 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1567 pa_source_assert_ref(s);
1568 pa_assert_ctl_context();
1569 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1570
1571 if (s->refresh_volume || force_refresh) {
1572 struct pa_cvolume old_real_volume;
1573
1574 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1575
1576 old_real_volume = s->real_volume;
1577
1578 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume)
1579 s->get_volume(s);
1580
1581 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1582
1583 update_real_volume(s, &s->real_volume, &s->channel_map);
1584 propagate_real_volume(s, &old_real_volume);
1585 }
1586
1587 return &s->reference_volume;
1588 }
1589
1590 /* Called from main thread. In volume sharing cases, only the root source may
1591 * call this. */
1592 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1593 pa_cvolume old_real_volume;
1594
1595 pa_source_assert_ref(s);
1596 pa_assert_ctl_context();
1597 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1598 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1599
1600 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1601
1602 old_real_volume = s->real_volume;
1603 update_real_volume(s, new_real_volume, &s->channel_map);
1604 propagate_real_volume(s, &old_real_volume);
1605 }
1606
1607 /* Called from main thread */
1608 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1609 pa_bool_t old_muted;
1610
1611 pa_source_assert_ref(s);
1612 pa_assert_ctl_context();
1613 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1614
1615 old_muted = s->muted;
1616 s->muted = mute;
1617 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1618
1619 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->set_mute)
1620 s->set_mute(s);
1621
1622 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1623
1624 if (old_muted != s->muted)
1625 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1626 }
1627
1628 /* Called from main thread */
1629 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1630
1631 pa_source_assert_ref(s);
1632 pa_assert_ctl_context();
1633 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1634
1635 if (s->refresh_muted || force_refresh) {
1636 pa_bool_t old_muted = s->muted;
1637
1638 if (!(s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_mute)
1639 s->get_mute(s);
1640
1641 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1642
1643 if (old_muted != s->muted) {
1644 s->save_muted = TRUE;
1645
1646 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1647
1648 /* Make sure the soft mute status stays in sync */
1649 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1650 }
1651 }
1652
1653 return s->muted;
1654 }
1655
1656 /* Called from main thread */
1657 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1658 pa_source_assert_ref(s);
1659 pa_assert_ctl_context();
1660 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1661
1662 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1663
1664 if (s->muted == new_muted)
1665 return;
1666
1667 s->muted = new_muted;
1668 s->save_muted = TRUE;
1669
1670 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1671 }
1672
1673 /* Called from main thread */
1674 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1675 pa_source_assert_ref(s);
1676 pa_assert_ctl_context();
1677
1678 if (p)
1679 pa_proplist_update(s->proplist, mode, p);
1680
1681 if (PA_SOURCE_IS_LINKED(s->state)) {
1682 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1683 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1684 }
1685
1686 return TRUE;
1687 }
1688
1689 /* Called from main thread */
1690 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1691 void pa_source_set_description(pa_source *s, const char *description) {
1692 const char *old;
1693 pa_source_assert_ref(s);
1694 pa_assert_ctl_context();
1695
1696 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1697 return;
1698
1699 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1700
1701 if (old && description && pa_streq(old, description))
1702 return;
1703
1704 if (description)
1705 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1706 else
1707 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1708
1709 if (PA_SOURCE_IS_LINKED(s->state)) {
1710 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1711 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1712 }
1713 }
1714
1715 /* Called from main thread */
1716 unsigned pa_source_linked_by(pa_source *s) {
1717 pa_source_assert_ref(s);
1718 pa_assert_ctl_context();
1719 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1720
1721 return pa_idxset_size(s->outputs);
1722 }
1723
1724 /* Called from main thread */
1725 unsigned pa_source_used_by(pa_source *s) {
1726 unsigned ret;
1727
1728 pa_source_assert_ref(s);
1729 pa_assert_ctl_context();
1730 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1731
1732 ret = pa_idxset_size(s->outputs);
1733 pa_assert(ret >= s->n_corked);
1734
1735 return ret - s->n_corked;
1736 }
1737
1738 /* Called from main thread */
1739 unsigned pa_source_check_suspend(pa_source *s) {
1740 unsigned ret;
1741 pa_source_output *o;
1742 uint32_t idx;
1743
1744 pa_source_assert_ref(s);
1745 pa_assert_ctl_context();
1746
1747 if (!PA_SOURCE_IS_LINKED(s->state))
1748 return 0;
1749
1750 ret = 0;
1751
1752 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1753 pa_source_output_state_t st;
1754
1755 st = pa_source_output_get_state(o);
1756
1757 /* We do not assert here. It is perfectly valid for a source output to
1758 * be in the INIT state (i.e. created, marked done but not yet put)
1759 * and we should not care if it's unlinked as it won't contribute
1760 * towarards our busy status.
1761 */
1762 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1763 continue;
1764
1765 if (st == PA_SOURCE_OUTPUT_CORKED)
1766 continue;
1767
1768 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1769 continue;
1770
1771 ret ++;
1772 }
1773
1774 return ret;
1775 }
1776
1777 /* Called from the IO thread */
1778 static void sync_output_volumes_within_thread(pa_source *s) {
1779 pa_source_output *o;
1780 void *state = NULL;
1781
1782 pa_source_assert_ref(s);
1783 pa_source_assert_io_context(s);
1784
1785 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1786 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1787 continue;
1788
1789 o->thread_info.soft_volume = o->soft_volume;
1790 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1791 }
1792 }
1793
1794 /* Called from the IO thread. Only called for the root source in volume sharing
1795 * cases, except for internal recursive calls. */
1796 static void set_shared_volume_within_thread(pa_source *s) {
1797 pa_source_output *o;
1798 void *state = NULL;
1799
1800 pa_source_assert_ref(s);
1801
1802 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1803
1804 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1805 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1806 set_shared_volume_within_thread(o->destination_source);
1807 }
1808 }
1809
1810 /* Called from IO thread, except when it is not */
1811 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1812 pa_source *s = PA_SOURCE(object);
1813 pa_source_assert_ref(s);
1814
1815 switch ((pa_source_message_t) code) {
1816
1817 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1818 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1819
1820 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1821
1822 if (o->direct_on_input) {
1823 o->thread_info.direct_on_input = o->direct_on_input;
1824 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1825 }
1826
1827 pa_assert(!o->thread_info.attached);
1828 o->thread_info.attached = TRUE;
1829
1830 if (o->attach)
1831 o->attach(o);
1832
1833 pa_source_output_set_state_within_thread(o, o->state);
1834
1835 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1836 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1837
1838 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1839
1840 /* We don't just invalidate the requested latency here,
1841 * because if we are in a move we might need to fix up the
1842 * requested latency. */
1843 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1844
1845 /* In flat volume mode we need to update the volume as
1846 * well */
1847 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1848 }
1849
1850 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1851 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1852
1853 pa_source_output_set_state_within_thread(o, o->state);
1854
1855 if (o->detach)
1856 o->detach(o);
1857
1858 pa_assert(o->thread_info.attached);
1859 o->thread_info.attached = FALSE;
1860
1861 if (o->thread_info.direct_on_input) {
1862 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
1863 o->thread_info.direct_on_input = NULL;
1864 }
1865
1866 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
1867 pa_source_output_unref(o);
1868
1869 pa_source_invalidate_requested_latency(s, TRUE);
1870
1871 /* In flat volume mode we need to update the volume as
1872 * well */
1873 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1874 }
1875
1876 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
1877 pa_source *root_source = s;
1878
1879 while (root_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)
1880 root_source = root_source->output_from_master->source;
1881
1882 set_shared_volume_within_thread(root_source);
1883 return 0;
1884 }
1885
1886 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
1887
1888 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
1889 s->set_volume(s);
1890 pa_source_volume_change_push(s);
1891 }
1892 /* Fall through ... */
1893
1894 case PA_SOURCE_MESSAGE_SET_VOLUME:
1895
1896 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1897 s->thread_info.soft_volume = s->soft_volume;
1898 }
1899
1900 /* Fall through ... */
1901
1902 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
1903 sync_output_volumes_within_thread(s);
1904 return 0;
1905
1906 case PA_SOURCE_MESSAGE_GET_VOLUME:
1907
1908 if ((s->flags & PA_SOURCE_SYNC_VOLUME) && s->get_volume) {
1909 s->get_volume(s);
1910 pa_source_volume_change_flush(s);
1911 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
1912 }
1913
1914 /* In case source implementor reset SW volume. */
1915 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1916 s->thread_info.soft_volume = s->soft_volume;
1917 }
1918
1919 return 0;
1920
1921 case PA_SOURCE_MESSAGE_SET_MUTE:
1922
1923 if (s->thread_info.soft_muted != s->muted) {
1924 s->thread_info.soft_muted = s->muted;
1925 }
1926
1927 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->set_mute)
1928 s->set_mute(s);
1929
1930 return 0;
1931
1932 case PA_SOURCE_MESSAGE_GET_MUTE:
1933
1934 if (s->flags & PA_SOURCE_SYNC_VOLUME && s->get_mute)
1935 s->get_mute(s);
1936
1937 return 0;
1938
1939 case PA_SOURCE_MESSAGE_SET_STATE: {
1940
1941 pa_bool_t suspend_change =
1942 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1943 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
1944
1945 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1946
1947 if (suspend_change) {
1948 pa_source_output *o;
1949 void *state = NULL;
1950
1951 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
1952 if (o->suspend_within_thread)
1953 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
1954 }
1955
1956 return 0;
1957 }
1958
1959 case PA_SOURCE_MESSAGE_DETACH:
1960
1961 /* Detach all streams */
1962 pa_source_detach_within_thread(s);
1963 return 0;
1964
1965 case PA_SOURCE_MESSAGE_ATTACH:
1966
1967 /* Reattach all streams */
1968 pa_source_attach_within_thread(s);
1969 return 0;
1970
1971 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
1972
1973 pa_usec_t *usec = userdata;
1974 *usec = pa_source_get_requested_latency_within_thread(s);
1975
1976 /* Yes, that's right, the IO thread will see -1 when no
1977 * explicit requested latency is configured, the main
1978 * thread will see max_latency */
1979 if (*usec == (pa_usec_t) -1)
1980 *usec = s->thread_info.max_latency;
1981
1982 return 0;
1983 }
1984
1985 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
1986 pa_usec_t *r = userdata;
1987
1988 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
1989
1990 return 0;
1991 }
1992
1993 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
1994 pa_usec_t *r = userdata;
1995
1996 r[0] = s->thread_info.min_latency;
1997 r[1] = s->thread_info.max_latency;
1998
1999 return 0;
2000 }
2001
2002 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2003
2004 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2005 return 0;
2006
2007 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2008
2009 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2010 return 0;
2011
2012 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2013
2014 *((size_t*) userdata) = s->thread_info.max_rewind;
2015 return 0;
2016
2017 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2018
2019 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2020 return 0;
2021
2022 case PA_SOURCE_MESSAGE_GET_LATENCY:
2023
2024 if (s->monitor_of) {
2025 *((pa_usec_t*) userdata) = 0;
2026 return 0;
2027 }
2028
2029 /* Implementors need to overwrite this implementation! */
2030 return -1;
2031
2032 case PA_SOURCE_MESSAGE_SET_PORT:
2033
2034 pa_assert(userdata);
2035 if (s->set_port) {
2036 struct source_message_set_port *msg_data = userdata;
2037 msg_data->ret = s->set_port(s, msg_data->port);
2038 }
2039 return 0;
2040
2041 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2042 /* This message is sent from IO-thread and handled in main thread. */
2043 pa_assert_ctl_context();
2044
2045 pa_source_get_volume(s, TRUE);
2046 pa_source_get_mute(s, TRUE);
2047 return 0;
2048
2049 case PA_SOURCE_MESSAGE_MAX:
2050 ;
2051 }
2052
2053 return -1;
2054 }
2055
2056 /* Called from main thread */
2057 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2058 pa_source *source;
2059 uint32_t idx;
2060 int ret = 0;
2061
2062 pa_core_assert_ref(c);
2063 pa_assert_ctl_context();
2064 pa_assert(cause != 0);
2065
2066 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2067 int r;
2068
2069 if (source->monitor_of)
2070 continue;
2071
2072 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2073 ret = r;
2074 }
2075
2076 return ret;
2077 }
2078
2079 /* Called from main thread */
2080 void pa_source_detach(pa_source *s) {
2081 pa_source_assert_ref(s);
2082 pa_assert_ctl_context();
2083 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2084
2085 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2086 }
2087
2088 /* Called from main thread */
2089 void pa_source_attach(pa_source *s) {
2090 pa_source_assert_ref(s);
2091 pa_assert_ctl_context();
2092 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2093
2094 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2095 }
2096
2097 /* Called from IO thread */
2098 void pa_source_detach_within_thread(pa_source *s) {
2099 pa_source_output *o;
2100 void *state = NULL;
2101
2102 pa_source_assert_ref(s);
2103 pa_source_assert_io_context(s);
2104 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2105
2106 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2107 if (o->detach)
2108 o->detach(o);
2109 }
2110
2111 /* Called from IO thread */
2112 void pa_source_attach_within_thread(pa_source *s) {
2113 pa_source_output *o;
2114 void *state = NULL;
2115
2116 pa_source_assert_ref(s);
2117 pa_source_assert_io_context(s);
2118 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2119
2120 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2121 if (o->attach)
2122 o->attach(o);
2123 }
2124
2125 /* Called from IO thread */
2126 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2127 pa_usec_t result = (pa_usec_t) -1;
2128 pa_source_output *o;
2129 void *state = NULL;
2130
2131 pa_source_assert_ref(s);
2132 pa_source_assert_io_context(s);
2133
2134 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2135 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2136
2137 if (s->thread_info.requested_latency_valid)
2138 return s->thread_info.requested_latency;
2139
2140 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2141 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2142 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2143 result = o->thread_info.requested_source_latency;
2144
2145 if (result != (pa_usec_t) -1)
2146 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2147
2148 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2149 /* Only cache this if we are fully set up */
2150 s->thread_info.requested_latency = result;
2151 s->thread_info.requested_latency_valid = TRUE;
2152 }
2153
2154 return result;
2155 }
2156
2157 /* Called from main thread */
2158 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2159 pa_usec_t usec = 0;
2160
2161 pa_source_assert_ref(s);
2162 pa_assert_ctl_context();
2163 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2164
2165 if (s->state == PA_SOURCE_SUSPENDED)
2166 return 0;
2167
2168 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2169
2170 return usec;
2171 }
2172
2173 /* Called from IO thread */
2174 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2175 pa_source_output *o;
2176 void *state = NULL;
2177
2178 pa_source_assert_ref(s);
2179 pa_source_assert_io_context(s);
2180
2181 if (max_rewind == s->thread_info.max_rewind)
2182 return;
2183
2184 s->thread_info.max_rewind = max_rewind;
2185
2186 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2187 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2188 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2189 }
2190
2191 /* Called from main thread */
2192 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2193 pa_source_assert_ref(s);
2194 pa_assert_ctl_context();
2195
2196 if (PA_SOURCE_IS_LINKED(s->state))
2197 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2198 else
2199 pa_source_set_max_rewind_within_thread(s, max_rewind);
2200 }
2201
2202 /* Called from IO thread */
2203 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2204 pa_source_output *o;
2205 void *state = NULL;
2206
2207 pa_source_assert_ref(s);
2208 pa_source_assert_io_context(s);
2209
2210 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2211 s->thread_info.requested_latency_valid = FALSE;
2212 else if (dynamic)
2213 return;
2214
2215 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2216
2217 if (s->update_requested_latency)
2218 s->update_requested_latency(s);
2219
2220 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2221 if (o->update_source_requested_latency)
2222 o->update_source_requested_latency(o);
2223 }
2224
2225 if (s->monitor_of)
2226 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2227 }
2228
2229 /* Called from main thread */
2230 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2231 pa_source_assert_ref(s);
2232 pa_assert_ctl_context();
2233
2234 /* min_latency == 0: no limit
2235 * min_latency anything else: specified limit
2236 *
2237 * Similar for max_latency */
2238
2239 if (min_latency < ABSOLUTE_MIN_LATENCY)
2240 min_latency = ABSOLUTE_MIN_LATENCY;
2241
2242 if (max_latency <= 0 ||
2243 max_latency > ABSOLUTE_MAX_LATENCY)
2244 max_latency = ABSOLUTE_MAX_LATENCY;
2245
2246 pa_assert(min_latency <= max_latency);
2247
2248 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2249 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2250 max_latency == ABSOLUTE_MAX_LATENCY) ||
2251 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2252
2253 if (PA_SOURCE_IS_LINKED(s->state)) {
2254 pa_usec_t r[2];
2255
2256 r[0] = min_latency;
2257 r[1] = max_latency;
2258
2259 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2260 } else
2261 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2262 }
2263
2264 /* Called from main thread */
2265 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2266 pa_source_assert_ref(s);
2267 pa_assert_ctl_context();
2268 pa_assert(min_latency);
2269 pa_assert(max_latency);
2270
2271 if (PA_SOURCE_IS_LINKED(s->state)) {
2272 pa_usec_t r[2] = { 0, 0 };
2273
2274 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2275
2276 *min_latency = r[0];
2277 *max_latency = r[1];
2278 } else {
2279 *min_latency = s->thread_info.min_latency;
2280 *max_latency = s->thread_info.max_latency;
2281 }
2282 }
2283
2284 /* Called from IO thread, and from main thread before pa_source_put() is called */
2285 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2286 pa_source_assert_ref(s);
2287 pa_source_assert_io_context(s);
2288
2289 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2290 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2291 pa_assert(min_latency <= max_latency);
2292
2293 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2294 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2295 max_latency == ABSOLUTE_MAX_LATENCY) ||
2296 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2297 s->monitor_of);
2298
2299 if (s->thread_info.min_latency == min_latency &&
2300 s->thread_info.max_latency == max_latency)
2301 return;
2302
2303 s->thread_info.min_latency = min_latency;
2304 s->thread_info.max_latency = max_latency;
2305
2306 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2307 pa_source_output *o;
2308 void *state = NULL;
2309
2310 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2311 if (o->update_source_latency_range)
2312 o->update_source_latency_range(o);
2313 }
2314
2315 pa_source_invalidate_requested_latency(s, FALSE);
2316 }
2317
2318 /* Called from main thread, before the source is put */
2319 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2320 pa_source_assert_ref(s);
2321 pa_assert_ctl_context();
2322
2323 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2324 pa_assert(latency == 0);
2325 return;
2326 }
2327
2328 if (latency < ABSOLUTE_MIN_LATENCY)
2329 latency = ABSOLUTE_MIN_LATENCY;
2330
2331 if (latency > ABSOLUTE_MAX_LATENCY)
2332 latency = ABSOLUTE_MAX_LATENCY;
2333
2334 if (PA_SOURCE_IS_LINKED(s->state))
2335 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2336 else
2337 s->thread_info.fixed_latency = latency;
2338 }
2339
2340 /* Called from main thread */
2341 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2342 pa_usec_t latency;
2343
2344 pa_source_assert_ref(s);
2345 pa_assert_ctl_context();
2346
2347 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2348 return 0;
2349
2350 if (PA_SOURCE_IS_LINKED(s->state))
2351 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2352 else
2353 latency = s->thread_info.fixed_latency;
2354
2355 return latency;
2356 }
2357
2358 /* Called from IO thread */
2359 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2360 pa_source_assert_ref(s);
2361 pa_source_assert_io_context(s);
2362
2363 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2364 pa_assert(latency == 0);
2365 return;
2366 }
2367
2368 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2369 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2370
2371 if (s->thread_info.fixed_latency == latency)
2372 return;
2373
2374 s->thread_info.fixed_latency = latency;
2375
2376 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2377 pa_source_output *o;
2378 void *state = NULL;
2379
2380 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2381 if (o->update_source_fixed_latency)
2382 o->update_source_fixed_latency(o);
2383 }
2384
2385 pa_source_invalidate_requested_latency(s, FALSE);
2386 }
2387
2388 /* Called from main thread */
2389 size_t pa_source_get_max_rewind(pa_source *s) {
2390 size_t r;
2391 pa_assert_ctl_context();
2392 pa_source_assert_ref(s);
2393
2394 if (!PA_SOURCE_IS_LINKED(s->state))
2395 return s->thread_info.max_rewind;
2396
2397 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2398
2399 return r;
2400 }
2401
2402 /* Called from main context */
2403 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2404 pa_device_port *port;
2405 int ret;
2406
2407 pa_source_assert_ref(s);
2408 pa_assert_ctl_context();
2409
2410 if (!s->set_port) {
2411 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2412 return -PA_ERR_NOTIMPLEMENTED;
2413 }
2414
2415 if (!s->ports)
2416 return -PA_ERR_NOENTITY;
2417
2418 if (!(port = pa_hashmap_get(s->ports, name)))
2419 return -PA_ERR_NOENTITY;
2420
2421 if (s->active_port == port) {
2422 s->save_port = s->save_port || save;
2423 return 0;
2424 }
2425
2426 if (s->flags & PA_SOURCE_SYNC_VOLUME) {
2427 struct source_message_set_port msg = { .port = port, .ret = 0 };
2428 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2429 ret = msg.ret;
2430 }
2431 else
2432 ret = s->set_port(s, port);
2433
2434 if (ret < 0)
2435 return -PA_ERR_NOENTITY;
2436
2437 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2438
2439 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2440
2441 s->active_port = port;
2442 s->save_port = save;
2443
2444 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2445
2446 return 0;
2447 }
2448
2449 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2450
2451 /* Called from the IO thread. */
2452 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2453 pa_source_volume_change *c;
2454 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2455 c = pa_xnew(pa_source_volume_change, 1);
2456
2457 PA_LLIST_INIT(pa_source_volume_change, c);
2458 c->at = 0;
2459 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2460 return c;
2461 }
2462
2463 /* Called from the IO thread. */
2464 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2465 pa_assert(c);
2466 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2467 pa_xfree(c);
2468 }
2469
2470 /* Called from the IO thread. */
2471 void pa_source_volume_change_push(pa_source *s) {
2472 pa_source_volume_change *c = NULL;
2473 pa_source_volume_change *nc = NULL;
2474 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2475
2476 const char *direction = NULL;
2477
2478 pa_assert(s);
2479 nc = pa_source_volume_change_new(s);
2480
2481 /* NOTE: There is already more different volumes in pa_source that I can remember.
2482 * Adding one more volume for HW would get us rid of this, but I am trying
2483 * to survive with the ones we already have. */
2484 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2485
2486 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2487 pa_log_debug("Volume not changing");
2488 pa_source_volume_change_free(nc);
2489 return;
2490 }
2491
2492 nc->at = pa_source_get_latency_within_thread(s);
2493 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2494
2495 if (s->thread_info.volume_changes_tail) {
2496 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2497 /* If volume is going up let's do it a bit late. If it is going
2498 * down let's do it a bit early. */
2499 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2500 if (nc->at + safety_margin > c->at) {
2501 nc->at += safety_margin;
2502 direction = "up";
2503 break;
2504 }
2505 }
2506 else if (nc->at - safety_margin > c->at) {
2507 nc->at -= safety_margin;
2508 direction = "down";
2509 break;
2510 }
2511 }
2512 }
2513
2514 if (c == NULL) {
2515 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2516 nc->at += safety_margin;
2517 direction = "up";
2518 } else {
2519 nc->at -= safety_margin;
2520 direction = "down";
2521 }
2522 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2523 }
2524 else {
2525 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2526 }
2527
2528 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2529
2530 /* We can ignore volume events that came earlier but should happen later than this. */
2531 PA_LLIST_FOREACH(c, nc->next) {
2532 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2533 pa_source_volume_change_free(c);
2534 }
2535 nc->next = NULL;
2536 s->thread_info.volume_changes_tail = nc;
2537 }
2538
2539 /* Called from the IO thread. */
2540 static void pa_source_volume_change_flush(pa_source *s) {
2541 pa_source_volume_change *c = s->thread_info.volume_changes;
2542 pa_assert(s);
2543 s->thread_info.volume_changes = NULL;
2544 s->thread_info.volume_changes_tail = NULL;
2545 while (c) {
2546 pa_source_volume_change *next = c->next;
2547 pa_source_volume_change_free(c);
2548 c = next;
2549 }
2550 }
2551
2552 /* Called from the IO thread. */
2553 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2554 pa_usec_t now = pa_rtclock_now();
2555 pa_bool_t ret = FALSE;
2556
2557 pa_assert(s);
2558
2559 if (!PA_SOURCE_IS_LINKED(s->state)) {
2560 if (usec_to_next)
2561 *usec_to_next = 0;
2562 return ret;
2563 }
2564
2565 pa_assert(s->write_volume);
2566
2567 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2568 pa_source_volume_change *c = s->thread_info.volume_changes;
2569 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2570 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2571 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2572 ret = TRUE;
2573 s->thread_info.current_hw_volume = c->hw_volume;
2574 pa_source_volume_change_free(c);
2575 }
2576
2577 if (s->write_volume && ret)
2578 s->write_volume(s);
2579
2580 if (s->thread_info.volume_changes) {
2581 if (usec_to_next)
2582 *usec_to_next = s->thread_info.volume_changes->at - now;
2583 if (pa_log_ratelimit(PA_LOG_DEBUG))
2584 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2585 }
2586 else {
2587 if (usec_to_next)
2588 *usec_to_next = 0;
2589 s->thread_info.volume_changes_tail = NULL;
2590 }
2591 return ret;
2592 }
2593
2594
2595 /* Called from the main thread */
2596 /* Gets the list of formats supported by the source. The members and idxset must
2597 * be freed by the caller. */
2598 pa_idxset* pa_source_get_formats(pa_source *s) {
2599 pa_idxset *ret;
2600
2601 pa_assert(s);
2602
2603 if (s->get_formats) {
2604 /* Source supports format query, all is good */
2605 ret = s->get_formats(s);
2606 } else {
2607 /* Source doesn't support format query, so assume it does PCM */
2608 pa_format_info *f = pa_format_info_new();
2609 f->encoding = PA_ENCODING_PCM;
2610
2611 ret = pa_idxset_new(NULL, NULL);
2612 pa_idxset_put(ret, f, NULL);
2613 }
2614
2615 return ret;
2616 }
2617
2618 /* Called from the main thread */
2619 /* Checks if the source can accept this format */
2620 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2621 {
2622 pa_idxset *formats = NULL;
2623 pa_bool_t ret = FALSE;
2624
2625 pa_assert(s);
2626 pa_assert(f);
2627
2628 formats = pa_source_get_formats(s);
2629
2630 if (formats) {
2631 pa_format_info *finfo_device;
2632 uint32_t i;
2633
2634 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2635 if (pa_format_info_is_compatible(finfo_device, f)) {
2636 ret = TRUE;
2637 break;
2638 }
2639 }
2640
2641 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2642 }
2643
2644 return ret;
2645 }
2646
2647 /* Called from the main thread */
2648 /* Calculates the intersection between formats supported by the source and
2649 * in_formats, and returns these, in the order of the source's formats. */
2650 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2651 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2652 pa_format_info *f_source, *f_in;
2653 uint32_t i, j;
2654
2655 pa_assert(s);
2656
2657 if (!in_formats || pa_idxset_isempty(in_formats))
2658 goto done;
2659
2660 source_formats = pa_source_get_formats(s);
2661
2662 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2663 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2664 if (pa_format_info_is_compatible(f_source, f_in))
2665 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2666 }
2667 }
2668
2669 done:
2670 if (source_formats)
2671 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2672
2673 return out_formats;
2674 }