]> code.delx.au - pulseaudio/blob - src/pulsecore/source.c
hashmap: Use pa_free_cb_t instead of pa_free2_cb_t
[pulseaudio] / src / pulsecore / source.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdio.h>
28 #include <stdlib.h>
29
30 #include <pulse/format.h>
31 #include <pulse/utf8.h>
32 #include <pulse/xmalloc.h>
33 #include <pulse/timeval.h>
34 #include <pulse/util.h>
35 #include <pulse/rtclock.h>
36 #include <pulse/internal.h>
37
38 #include <pulsecore/core-util.h>
39 #include <pulsecore/source-output.h>
40 #include <pulsecore/namereg.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/mix.h>
44 #include <pulsecore/flist.h>
45
46 #include "source.h"
47
48 #define ABSOLUTE_MIN_LATENCY (500)
49 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
50 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
51
52 PA_DEFINE_PUBLIC_CLASS(pa_source, pa_msgobject);
53
54 struct pa_source_volume_change {
55 pa_usec_t at;
56 pa_cvolume hw_volume;
57
58 PA_LLIST_FIELDS(pa_source_volume_change);
59 };
60
61 struct source_message_set_port {
62 pa_device_port *port;
63 int ret;
64 };
65
66 static void source_free(pa_object *o);
67
68 static void pa_source_volume_change_push(pa_source *s);
69 static void pa_source_volume_change_flush(pa_source *s);
70
71 pa_source_new_data* pa_source_new_data_init(pa_source_new_data *data) {
72 pa_assert(data);
73
74 pa_zero(*data);
75 data->proplist = pa_proplist_new();
76 data->ports = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
77
78 return data;
79 }
80
81 void pa_source_new_data_set_name(pa_source_new_data *data, const char *name) {
82 pa_assert(data);
83
84 pa_xfree(data->name);
85 data->name = pa_xstrdup(name);
86 }
87
88 void pa_source_new_data_set_sample_spec(pa_source_new_data *data, const pa_sample_spec *spec) {
89 pa_assert(data);
90
91 if ((data->sample_spec_is_set = !!spec))
92 data->sample_spec = *spec;
93 }
94
95 void pa_source_new_data_set_channel_map(pa_source_new_data *data, const pa_channel_map *map) {
96 pa_assert(data);
97
98 if ((data->channel_map_is_set = !!map))
99 data->channel_map = *map;
100 }
101
102 void pa_source_new_data_set_alternate_sample_rate(pa_source_new_data *data, const uint32_t alternate_sample_rate) {
103 pa_assert(data);
104
105 data->alternate_sample_rate_is_set = TRUE;
106 data->alternate_sample_rate = alternate_sample_rate;
107 }
108
109 void pa_source_new_data_set_volume(pa_source_new_data *data, const pa_cvolume *volume) {
110 pa_assert(data);
111
112 if ((data->volume_is_set = !!volume))
113 data->volume = *volume;
114 }
115
116 void pa_source_new_data_set_muted(pa_source_new_data *data, pa_bool_t mute) {
117 pa_assert(data);
118
119 data->muted_is_set = TRUE;
120 data->muted = !!mute;
121 }
122
123 void pa_source_new_data_set_port(pa_source_new_data *data, const char *port) {
124 pa_assert(data);
125
126 pa_xfree(data->active_port);
127 data->active_port = pa_xstrdup(port);
128 }
129
130 void pa_source_new_data_done(pa_source_new_data *data) {
131 pa_assert(data);
132
133 pa_proplist_free(data->proplist);
134
135 if (data->ports)
136 pa_device_port_hashmap_free(data->ports);
137
138 pa_xfree(data->name);
139 pa_xfree(data->active_port);
140 }
141
142 /* Called from main context */
143 static void reset_callbacks(pa_source *s) {
144 pa_assert(s);
145
146 s->set_state = NULL;
147 s->get_volume = NULL;
148 s->set_volume = NULL;
149 s->write_volume = NULL;
150 s->get_mute = NULL;
151 s->set_mute = NULL;
152 s->update_requested_latency = NULL;
153 s->set_port = NULL;
154 s->get_formats = NULL;
155 s->update_rate = NULL;
156 }
157
158 /* Called from main context */
159 pa_source* pa_source_new(
160 pa_core *core,
161 pa_source_new_data *data,
162 pa_source_flags_t flags) {
163
164 pa_source *s;
165 const char *name;
166 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
167 char *pt;
168
169 pa_assert(core);
170 pa_assert(data);
171 pa_assert(data->name);
172 pa_assert_ctl_context();
173
174 s = pa_msgobject_new(pa_source);
175
176 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SOURCE, s, data->namereg_fail))) {
177 pa_log_debug("Failed to register name %s.", data->name);
178 pa_xfree(s);
179 return NULL;
180 }
181
182 pa_source_new_data_set_name(data, name);
183
184 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_NEW], data) < 0) {
185 pa_xfree(s);
186 pa_namereg_unregister(core, name);
187 return NULL;
188 }
189
190 /* FIXME, need to free s here on failure */
191
192 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
193 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
194
195 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
196
197 if (!data->channel_map_is_set)
198 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
199
200 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
201 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
202
203 /* FIXME: There should probably be a general function for checking whether
204 * the source volume is allowed to be set, like there is for source outputs. */
205 pa_assert(!data->volume_is_set || !(flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
206
207 if (!data->volume_is_set) {
208 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
209 data->save_volume = FALSE;
210 }
211
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
214
215 if (!data->muted_is_set)
216 data->muted = FALSE;
217
218 if (data->card)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
220
221 pa_device_init_description(data->proplist);
222 pa_device_init_icon(data->proplist, FALSE);
223 pa_device_init_intended_roles(data->proplist);
224
225 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SOURCE_FIXATE], data) < 0) {
226 pa_xfree(s);
227 pa_namereg_unregister(core, name);
228 return NULL;
229 }
230
231 s->parent.parent.free = source_free;
232 s->parent.process_msg = pa_source_process_msg;
233
234 s->core = core;
235 s->state = PA_SOURCE_INIT;
236 s->flags = flags;
237 s->priority = 0;
238 s->suspend_cause = data->suspend_cause;
239 pa_source_set_mixer_dirty(s, FALSE);
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->priority = pa_device_init_priority(s->proplist);
247
248 s->sample_spec = data->sample_spec;
249 s->channel_map = data->channel_map;
250 s->default_sample_rate = s->sample_spec.rate;
251
252 if (data->alternate_sample_rate_is_set)
253 s->alternate_sample_rate = data->alternate_sample_rate;
254 else
255 s->alternate_sample_rate = s->core->alternate_sample_rate;
256
257 if (s->sample_spec.rate == s->alternate_sample_rate) {
258 pa_log_warn("Default and alternate sample rates are the same.");
259 s->alternate_sample_rate = 0;
260 }
261
262 s->outputs = pa_idxset_new(NULL, NULL);
263 s->n_corked = 0;
264 s->monitor_of = NULL;
265 s->output_from_master = NULL;
266
267 s->reference_volume = s->real_volume = data->volume;
268 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
269 s->base_volume = PA_VOLUME_NORM;
270 s->n_volume_steps = PA_VOLUME_NORM+1;
271 s->muted = data->muted;
272 s->refresh_volume = s->refresh_muted = FALSE;
273
274 reset_callbacks(s);
275 s->userdata = NULL;
276
277 s->asyncmsgq = NULL;
278
279 /* As a minor optimization we just steal the list instead of
280 * copying it here */
281 s->ports = data->ports;
282 data->ports = NULL;
283
284 s->active_port = NULL;
285 s->save_port = FALSE;
286
287 if (data->active_port)
288 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
289 s->save_port = data->save_port;
290
291 if (!s->active_port) {
292 void *state;
293 pa_device_port *p;
294
295 PA_HASHMAP_FOREACH(p, s->ports, state)
296 if (!s->active_port || p->priority > s->active_port->priority)
297 s->active_port = p;
298 }
299
300 if (s->active_port)
301 s->latency_offset = s->active_port->latency_offset;
302 else
303 s->latency_offset = 0;
304
305 s->save_volume = data->save_volume;
306 s->save_muted = data->save_muted;
307
308 pa_silence_memchunk_get(
309 &core->silence_cache,
310 core->mempool,
311 &s->silence,
312 &s->sample_spec,
313 0);
314
315 s->thread_info.rtpoll = NULL;
316 s->thread_info.outputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
317 s->thread_info.soft_volume = s->soft_volume;
318 s->thread_info.soft_muted = s->muted;
319 s->thread_info.state = s->state;
320 s->thread_info.max_rewind = 0;
321 s->thread_info.requested_latency_valid = FALSE;
322 s->thread_info.requested_latency = 0;
323 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
324 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
325 s->thread_info.fixed_latency = flags & PA_SOURCE_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
326
327 PA_LLIST_HEAD_INIT(pa_source_volume_change, s->thread_info.volume_changes);
328 s->thread_info.volume_changes_tail = NULL;
329 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
330 s->thread_info.volume_change_safety_margin = core->deferred_volume_safety_margin_usec;
331 s->thread_info.volume_change_extra_delay = core->deferred_volume_extra_delay_usec;
332 s->thread_info.latency_offset = s->latency_offset;
333
334 /* FIXME: This should probably be moved to pa_source_put() */
335 pa_assert_se(pa_idxset_put(core->sources, s, &s->index) >= 0);
336
337 if (s->card)
338 pa_assert_se(pa_idxset_put(s->card->sources, s, NULL) >= 0);
339
340 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
341 pa_log_info("Created source %u \"%s\" with sample spec %s and channel map %s\n %s",
342 s->index,
343 s->name,
344 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
345 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
346 pt);
347 pa_xfree(pt);
348
349 return s;
350 }
351
352 /* Called from main context */
353 static int source_set_state(pa_source *s, pa_source_state_t state) {
354 int ret;
355 pa_bool_t suspend_change;
356 pa_source_state_t original_state;
357
358 pa_assert(s);
359 pa_assert_ctl_context();
360
361 if (s->state == state)
362 return 0;
363
364 original_state = s->state;
365
366 suspend_change =
367 (original_state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(state)) ||
368 (PA_SOURCE_IS_OPENED(original_state) && state == PA_SOURCE_SUSPENDED);
369
370 if (s->set_state)
371 if ((ret = s->set_state(s, state)) < 0)
372 return ret;
373
374 if (s->asyncmsgq)
375 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
376
377 if (s->set_state)
378 s->set_state(s, original_state);
379
380 return ret;
381 }
382
383 s->state = state;
384
385 if (state != PA_SOURCE_UNLINKED) { /* if we enter UNLINKED state pa_source_unlink() will fire the appropriate events */
386 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], s);
387 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
388 }
389
390 if (suspend_change) {
391 pa_source_output *o;
392 uint32_t idx;
393
394 /* We're suspending or resuming, tell everyone about it */
395
396 PA_IDXSET_FOREACH(o, s->outputs, idx)
397 if (s->state == PA_SOURCE_SUSPENDED &&
398 (o->flags & PA_SOURCE_OUTPUT_KILL_ON_SUSPEND))
399 pa_source_output_kill(o);
400 else if (o->suspend)
401 o->suspend(o, state == PA_SOURCE_SUSPENDED);
402 }
403
404 return 0;
405 }
406
407 void pa_source_set_get_volume_callback(pa_source *s, pa_source_cb_t cb) {
408 pa_assert(s);
409
410 s->get_volume = cb;
411 }
412
413 void pa_source_set_set_volume_callback(pa_source *s, pa_source_cb_t cb) {
414 pa_source_flags_t flags;
415
416 pa_assert(s);
417 pa_assert(!s->write_volume || cb);
418
419 s->set_volume = cb;
420
421 /* Save the current flags so we can tell if they've changed */
422 flags = s->flags;
423
424 if (cb) {
425 /* The source implementor is responsible for setting decibel volume support */
426 s->flags |= PA_SOURCE_HW_VOLUME_CTRL;
427 } else {
428 s->flags &= ~PA_SOURCE_HW_VOLUME_CTRL;
429 /* See note below in pa_source_put() about volume sharing and decibel volumes */
430 pa_source_enable_decibel_volume(s, !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
431 }
432
433 /* If the flags have changed after init, let any clients know via a change event */
434 if (s->state != PA_SOURCE_INIT && flags != s->flags)
435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
436 }
437
438 void pa_source_set_write_volume_callback(pa_source *s, pa_source_cb_t cb) {
439 pa_source_flags_t flags;
440
441 pa_assert(s);
442 pa_assert(!cb || s->set_volume);
443
444 s->write_volume = cb;
445
446 /* Save the current flags so we can tell if they've changed */
447 flags = s->flags;
448
449 if (cb)
450 s->flags |= PA_SOURCE_DEFERRED_VOLUME;
451 else
452 s->flags &= ~PA_SOURCE_DEFERRED_VOLUME;
453
454 /* If the flags have changed after init, let any clients know via a change event */
455 if (s->state != PA_SOURCE_INIT && flags != s->flags)
456 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
457 }
458
459 void pa_source_set_get_mute_callback(pa_source *s, pa_source_cb_t cb) {
460 pa_assert(s);
461
462 s->get_mute = cb;
463 }
464
465 void pa_source_set_set_mute_callback(pa_source *s, pa_source_cb_t cb) {
466 pa_source_flags_t flags;
467
468 pa_assert(s);
469
470 s->set_mute = cb;
471
472 /* Save the current flags so we can tell if they've changed */
473 flags = s->flags;
474
475 if (cb)
476 s->flags |= PA_SOURCE_HW_MUTE_CTRL;
477 else
478 s->flags &= ~PA_SOURCE_HW_MUTE_CTRL;
479
480 /* If the flags have changed after init, let any clients know via a change event */
481 if (s->state != PA_SOURCE_INIT && flags != s->flags)
482 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
483 }
484
485 static void enable_flat_volume(pa_source *s, pa_bool_t enable) {
486 pa_source_flags_t flags;
487
488 pa_assert(s);
489
490 /* Always follow the overall user preference here */
491 enable = enable && s->core->flat_volumes;
492
493 /* Save the current flags so we can tell if they've changed */
494 flags = s->flags;
495
496 if (enable)
497 s->flags |= PA_SOURCE_FLAT_VOLUME;
498 else
499 s->flags &= ~PA_SOURCE_FLAT_VOLUME;
500
501 /* If the flags have changed after init, let any clients know via a change event */
502 if (s->state != PA_SOURCE_INIT && flags != s->flags)
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
504 }
505
506 void pa_source_enable_decibel_volume(pa_source *s, pa_bool_t enable) {
507 pa_source_flags_t flags;
508
509 pa_assert(s);
510
511 /* Save the current flags so we can tell if they've changed */
512 flags = s->flags;
513
514 if (enable) {
515 s->flags |= PA_SOURCE_DECIBEL_VOLUME;
516 enable_flat_volume(s, TRUE);
517 } else {
518 s->flags &= ~PA_SOURCE_DECIBEL_VOLUME;
519 enable_flat_volume(s, FALSE);
520 }
521
522 /* If the flags have changed after init, let any clients know via a change event */
523 if (s->state != PA_SOURCE_INIT && flags != s->flags)
524 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
525 }
526
527 /* Called from main context */
528 void pa_source_put(pa_source *s) {
529 pa_source_assert_ref(s);
530 pa_assert_ctl_context();
531
532 pa_assert(s->state == PA_SOURCE_INIT);
533 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || s->output_from_master);
534
535 /* The following fields must be initialized properly when calling _put() */
536 pa_assert(s->asyncmsgq);
537 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
538
539 /* Generally, flags should be initialized via pa_source_new(). As a
540 * special exception we allow some volume related flags to be set
541 * between _new() and _put() by the callback setter functions above.
542 *
543 * Thus we implement a couple safeguards here which ensure the above
544 * setters were used (or at least the implementor made manual changes
545 * in a compatible way).
546 *
547 * Note: All of these flags set here can change over the life time
548 * of the source. */
549 pa_assert(!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) || s->set_volume);
550 pa_assert(!(s->flags & PA_SOURCE_DEFERRED_VOLUME) || s->write_volume);
551 pa_assert(!(s->flags & PA_SOURCE_HW_MUTE_CTRL) || s->set_mute);
552
553 /* XXX: Currently decibel volume is disabled for all sources that use volume
554 * sharing. When the master source supports decibel volume, it would be good
555 * to have the flag also in the filter source, but currently we don't do that
556 * so that the flags of the filter source never change when it's moved from
557 * a master source to another. One solution for this problem would be to
558 * remove user-visible volume altogether from filter sources when volume
559 * sharing is used, but the current approach was easier to implement... */
560 /* We always support decibel volumes in software, otherwise we leave it to
561 * the source implementor to set this flag as needed.
562 *
563 * Note: This flag can also change over the life time of the source. */
564 if (!(s->flags & PA_SOURCE_HW_VOLUME_CTRL) && !(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
565 pa_source_enable_decibel_volume(s, TRUE);
566
567 /* If the source implementor support DB volumes by itself, we should always
568 * try and enable flat volumes too */
569 if ((s->flags & PA_SOURCE_DECIBEL_VOLUME))
570 enable_flat_volume(s, TRUE);
571
572 if (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) {
573 pa_source *root_source = pa_source_get_master(s);
574
575 pa_assert(PA_LIKELY(root_source));
576
577 s->reference_volume = root_source->reference_volume;
578 pa_cvolume_remap(&s->reference_volume, &root_source->channel_map, &s->channel_map);
579
580 s->real_volume = root_source->real_volume;
581 pa_cvolume_remap(&s->real_volume, &root_source->channel_map, &s->channel_map);
582 } else
583 /* We assume that if the sink implementor changed the default
584 * volume he did so in real_volume, because that is the usual
585 * place where he is supposed to place his changes. */
586 s->reference_volume = s->real_volume;
587
588 s->thread_info.soft_volume = s->soft_volume;
589 s->thread_info.soft_muted = s->muted;
590 pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
591
592 pa_assert((s->flags & PA_SOURCE_HW_VOLUME_CTRL)
593 || (s->base_volume == PA_VOLUME_NORM
594 && ((s->flags & PA_SOURCE_DECIBEL_VOLUME || (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)))));
595 pa_assert(!(s->flags & PA_SOURCE_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
596 pa_assert(!(s->flags & PA_SOURCE_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
597
598 if (s->suspend_cause)
599 pa_assert_se(source_set_state(s, PA_SOURCE_SUSPENDED) == 0);
600 else
601 pa_assert_se(source_set_state(s, PA_SOURCE_IDLE) == 0);
602
603 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_NEW, s->index);
604 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PUT], s);
605 }
606
607 /* Called from main context */
608 void pa_source_unlink(pa_source *s) {
609 pa_bool_t linked;
610 pa_source_output *o, *j = NULL;
611
612 pa_assert(s);
613 pa_assert_ctl_context();
614
615 /* See pa_sink_unlink() for a couple of comments how this function
616 * works. */
617
618 linked = PA_SOURCE_IS_LINKED(s->state);
619
620 if (linked)
621 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], s);
622
623 if (s->state != PA_SOURCE_UNLINKED)
624 pa_namereg_unregister(s->core, s->name);
625 pa_idxset_remove_by_data(s->core->sources, s, NULL);
626
627 if (s->card)
628 pa_idxset_remove_by_data(s->card->sources, s, NULL);
629
630 while ((o = pa_idxset_first(s->outputs, NULL))) {
631 pa_assert(o != j);
632 pa_source_output_kill(o);
633 j = o;
634 }
635
636 if (linked)
637 source_set_state(s, PA_SOURCE_UNLINKED);
638 else
639 s->state = PA_SOURCE_UNLINKED;
640
641 reset_callbacks(s);
642
643 if (linked) {
644 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
645 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], s);
646 }
647 }
648
649 /* Called from main context */
650 static void source_free(pa_object *o) {
651 pa_source *s = PA_SOURCE(o);
652
653 pa_assert(s);
654 pa_assert_ctl_context();
655 pa_assert(pa_source_refcnt(s) == 0);
656
657 if (PA_SOURCE_IS_LINKED(s->state))
658 pa_source_unlink(s);
659
660 pa_log_info("Freeing source %u \"%s\"", s->index, s->name);
661
662 pa_idxset_free(s->outputs, NULL, NULL);
663 pa_hashmap_free(s->thread_info.outputs, (pa_free_cb_t) pa_source_output_unref);
664
665 if (s->silence.memblock)
666 pa_memblock_unref(s->silence.memblock);
667
668 pa_xfree(s->name);
669 pa_xfree(s->driver);
670
671 if (s->proplist)
672 pa_proplist_free(s->proplist);
673
674 if (s->ports)
675 pa_device_port_hashmap_free(s->ports);
676
677 pa_xfree(s);
678 }
679
680 /* Called from main context, and not while the IO thread is active, please */
681 void pa_source_set_asyncmsgq(pa_source *s, pa_asyncmsgq *q) {
682 pa_source_assert_ref(s);
683 pa_assert_ctl_context();
684
685 s->asyncmsgq = q;
686 }
687
688 /* Called from main context, and not while the IO thread is active, please */
689 void pa_source_update_flags(pa_source *s, pa_source_flags_t mask, pa_source_flags_t value) {
690 pa_source_assert_ref(s);
691 pa_assert_ctl_context();
692
693 if (mask == 0)
694 return;
695
696 /* For now, allow only a minimal set of flags to be changed. */
697 pa_assert((mask & ~(PA_SOURCE_DYNAMIC_LATENCY|PA_SOURCE_LATENCY)) == 0);
698
699 s->flags = (s->flags & ~mask) | (value & mask);
700 }
701
702 /* Called from IO context, or before _put() from main context */
703 void pa_source_set_rtpoll(pa_source *s, pa_rtpoll *p) {
704 pa_source_assert_ref(s);
705 pa_source_assert_io_context(s);
706
707 s->thread_info.rtpoll = p;
708 }
709
710 /* Called from main context */
711 int pa_source_update_status(pa_source*s) {
712 pa_source_assert_ref(s);
713 pa_assert_ctl_context();
714 pa_assert(PA_SOURCE_IS_LINKED(s->state));
715
716 if (s->state == PA_SOURCE_SUSPENDED)
717 return 0;
718
719 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
720 }
721
722 /* Called from any context - must be threadsafe */
723 void pa_source_set_mixer_dirty(pa_source *s, pa_bool_t is_dirty)
724 {
725 pa_atomic_store(&s->mixer_dirty, is_dirty ? 1 : 0);
726 }
727
728 /* Called from main context */
729 int pa_source_suspend(pa_source *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
730 pa_source_assert_ref(s);
731 pa_assert_ctl_context();
732 pa_assert(PA_SOURCE_IS_LINKED(s->state));
733 pa_assert(cause != 0);
734
735 if (s->monitor_of && cause != PA_SUSPEND_PASSTHROUGH)
736 return -PA_ERR_NOTSUPPORTED;
737
738 if (suspend)
739 s->suspend_cause |= cause;
740 else
741 s->suspend_cause &= ~cause;
742
743 if (!(s->suspend_cause & PA_SUSPEND_SESSION) && (pa_atomic_load(&s->mixer_dirty) != 0)) {
744 /* This might look racy but isn't: If somebody sets mixer_dirty exactly here,
745 it'll be handled just fine. */
746 pa_source_set_mixer_dirty(s, FALSE);
747 pa_log_debug("Mixer is now accessible. Updating alsa mixer settings.");
748 if (s->active_port && s->set_port) {
749 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
750 struct source_message_set_port msg = { .port = s->active_port, .ret = 0 };
751 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
752 }
753 else
754 s->set_port(s, s->active_port);
755 }
756 else {
757 if (s->set_mute)
758 s->set_mute(s);
759 if (s->set_volume)
760 s->set_volume(s);
761 }
762 }
763
764 if ((pa_source_get_state(s) == PA_SOURCE_SUSPENDED) == !!s->suspend_cause)
765 return 0;
766
767 pa_log_debug("Suspend cause of source %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
768
769 if (s->suspend_cause)
770 return source_set_state(s, PA_SOURCE_SUSPENDED);
771 else
772 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
773 }
774
775 /* Called from main context */
776 int pa_source_sync_suspend(pa_source *s) {
777 pa_sink_state_t state;
778
779 pa_source_assert_ref(s);
780 pa_assert_ctl_context();
781 pa_assert(PA_SOURCE_IS_LINKED(s->state));
782 pa_assert(s->monitor_of);
783
784 state = pa_sink_get_state(s->monitor_of);
785
786 if (state == PA_SINK_SUSPENDED)
787 return source_set_state(s, PA_SOURCE_SUSPENDED);
788
789 pa_assert(PA_SINK_IS_OPENED(state));
790
791 return source_set_state(s, pa_source_used_by(s) ? PA_SOURCE_RUNNING : PA_SOURCE_IDLE);
792 }
793
794 /* Called from main context */
795 pa_queue *pa_source_move_all_start(pa_source *s, pa_queue *q) {
796 pa_source_output *o, *n;
797 uint32_t idx;
798
799 pa_source_assert_ref(s);
800 pa_assert_ctl_context();
801 pa_assert(PA_SOURCE_IS_LINKED(s->state));
802
803 if (!q)
804 q = pa_queue_new();
805
806 for (o = PA_SOURCE_OUTPUT(pa_idxset_first(s->outputs, &idx)); o; o = n) {
807 n = PA_SOURCE_OUTPUT(pa_idxset_next(s->outputs, &idx));
808
809 pa_source_output_ref(o);
810
811 if (pa_source_output_start_move(o) >= 0)
812 pa_queue_push(q, o);
813 else
814 pa_source_output_unref(o);
815 }
816
817 return q;
818 }
819
820 /* Called from main context */
821 void pa_source_move_all_finish(pa_source *s, pa_queue *q, pa_bool_t save) {
822 pa_source_output *o;
823
824 pa_source_assert_ref(s);
825 pa_assert_ctl_context();
826 pa_assert(PA_SOURCE_IS_LINKED(s->state));
827 pa_assert(q);
828
829 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
830 if (pa_source_output_finish_move(o, s, save) < 0)
831 pa_source_output_fail_move(o);
832
833 pa_source_output_unref(o);
834 }
835
836 pa_queue_free(q, NULL);
837 }
838
839 /* Called from main context */
840 void pa_source_move_all_fail(pa_queue *q) {
841 pa_source_output *o;
842
843 pa_assert_ctl_context();
844 pa_assert(q);
845
846 while ((o = PA_SOURCE_OUTPUT(pa_queue_pop(q)))) {
847 pa_source_output_fail_move(o);
848 pa_source_output_unref(o);
849 }
850
851 pa_queue_free(q, NULL);
852 }
853
854 /* Called from IO thread context */
855 void pa_source_process_rewind(pa_source *s, size_t nbytes) {
856 pa_source_output *o;
857 void *state = NULL;
858
859 pa_source_assert_ref(s);
860 pa_source_assert_io_context(s);
861 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
862
863 if (nbytes <= 0)
864 return;
865
866 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
867 return;
868
869 pa_log_debug("Processing rewind...");
870
871 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
872 pa_source_output_assert_ref(o);
873 pa_source_output_process_rewind(o, nbytes);
874 }
875 }
876
877 /* Called from IO thread context */
878 void pa_source_post(pa_source*s, const pa_memchunk *chunk) {
879 pa_source_output *o;
880 void *state = NULL;
881
882 pa_source_assert_ref(s);
883 pa_source_assert_io_context(s);
884 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
885 pa_assert(chunk);
886
887 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
888 return;
889
890 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
891 pa_memchunk vchunk = *chunk;
892
893 pa_memblock_ref(vchunk.memblock);
894 pa_memchunk_make_writable(&vchunk, 0);
895
896 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
897 pa_silence_memchunk(&vchunk, &s->sample_spec);
898 else
899 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
900
901 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
902 pa_source_output_assert_ref(o);
903
904 if (!o->thread_info.direct_on_input)
905 pa_source_output_push(o, &vchunk);
906 }
907
908 pa_memblock_unref(vchunk.memblock);
909 } else {
910
911 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL))) {
912 pa_source_output_assert_ref(o);
913
914 if (!o->thread_info.direct_on_input)
915 pa_source_output_push(o, chunk);
916 }
917 }
918 }
919
920 /* Called from IO thread context */
921 void pa_source_post_direct(pa_source*s, pa_source_output *o, const pa_memchunk *chunk) {
922 pa_source_assert_ref(s);
923 pa_source_assert_io_context(s);
924 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
925 pa_source_output_assert_ref(o);
926 pa_assert(o->thread_info.direct_on_input);
927 pa_assert(chunk);
928
929 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
930 return;
931
932 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&s->thread_info.soft_volume)) {
933 pa_memchunk vchunk = *chunk;
934
935 pa_memblock_ref(vchunk.memblock);
936 pa_memchunk_make_writable(&vchunk, 0);
937
938 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&s->thread_info.soft_volume))
939 pa_silence_memchunk(&vchunk, &s->sample_spec);
940 else
941 pa_volume_memchunk(&vchunk, &s->sample_spec, &s->thread_info.soft_volume);
942
943 pa_source_output_push(o, &vchunk);
944
945 pa_memblock_unref(vchunk.memblock);
946 } else
947 pa_source_output_push(o, chunk);
948 }
949
950 /* Called from main thread */
951 pa_bool_t pa_source_update_rate(pa_source *s, uint32_t rate, pa_bool_t passthrough)
952 {
953 if (s->update_rate) {
954 uint32_t desired_rate = rate;
955 uint32_t default_rate = s->default_sample_rate;
956 uint32_t alternate_rate = s->alternate_sample_rate;
957 uint32_t idx;
958 pa_source_output *o;
959 pa_bool_t use_alternate = FALSE;
960
961 if (PA_UNLIKELY(default_rate == alternate_rate)) {
962 pa_log_warn("Default and alternate sample rates are the same.");
963 return FALSE;
964 }
965
966 if (PA_SOURCE_IS_RUNNING(s->state)) {
967 pa_log_info("Cannot update rate, SOURCE_IS_RUNNING, will keep using %u Hz",
968 s->sample_spec.rate);
969 return FALSE;
970 }
971
972 if (PA_UNLIKELY (desired_rate < 8000 ||
973 desired_rate > PA_RATE_MAX))
974 return FALSE;
975
976 if (!passthrough) {
977 pa_assert(default_rate % 4000 || default_rate % 11025);
978 pa_assert(alternate_rate % 4000 || alternate_rate % 11025);
979
980 if (default_rate % 4000) {
981 /* default is a 11025 multiple */
982 if ((alternate_rate % 4000 == 0) && (desired_rate % 4000 == 0))
983 use_alternate=TRUE;
984 } else {
985 /* default is 4000 multiple */
986 if ((alternate_rate % 11025 == 0) && (desired_rate % 11025 == 0))
987 use_alternate=TRUE;
988 }
989
990 if (use_alternate)
991 desired_rate = alternate_rate;
992 else
993 desired_rate = default_rate;
994 } else {
995 desired_rate = rate; /* use stream sampling rate, discard default/alternate settings */
996 }
997
998 if (desired_rate == s->sample_spec.rate)
999 return FALSE;
1000
1001 if (!passthrough && pa_source_used_by(s) > 0)
1002 return FALSE;
1003
1004 pa_log_debug("Suspending source %s due to changing the sample rate.", s->name);
1005 pa_source_suspend(s, TRUE, PA_SUSPEND_IDLE); /* needed before rate update, will be resumed automatically */
1006
1007 if (s->update_rate(s, desired_rate) == TRUE) {
1008 pa_log_info("Changed sampling rate successfully ");
1009
1010 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1011 if (o->state == PA_SOURCE_OUTPUT_CORKED)
1012 pa_source_output_update_rate(o);
1013 }
1014 return TRUE;
1015 }
1016 }
1017 return FALSE;
1018 }
1019
1020 /* Called from main thread */
1021 pa_usec_t pa_source_get_latency(pa_source *s) {
1022 pa_usec_t usec;
1023
1024 pa_source_assert_ref(s);
1025 pa_assert_ctl_context();
1026 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1027
1028 if (s->state == PA_SOURCE_SUSPENDED)
1029 return 0;
1030
1031 if (!(s->flags & PA_SOURCE_LATENCY))
1032 return 0;
1033
1034 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1035
1036 /* usec is unsigned, so check that the offset can be added to usec without
1037 * underflowing. */
1038 if (-s->latency_offset <= (int64_t) usec)
1039 usec += s->latency_offset;
1040 else
1041 usec = 0;
1042
1043 return usec;
1044 }
1045
1046 /* Called from IO thread */
1047 pa_usec_t pa_source_get_latency_within_thread(pa_source *s) {
1048 pa_usec_t usec = 0;
1049 pa_msgobject *o;
1050
1051 pa_source_assert_ref(s);
1052 pa_source_assert_io_context(s);
1053 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
1054
1055 /* The returned value is supposed to be in the time domain of the sound card! */
1056
1057 if (s->thread_info.state == PA_SOURCE_SUSPENDED)
1058 return 0;
1059
1060 if (!(s->flags & PA_SOURCE_LATENCY))
1061 return 0;
1062
1063 o = PA_MSGOBJECT(s);
1064
1065 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1066
1067 if (o->process_msg(o, PA_SOURCE_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1068 return -1;
1069
1070 /* usec is unsigned, so check that the offset can be added to usec without
1071 * underflowing. */
1072 if (-s->thread_info.latency_offset <= (int64_t) usec)
1073 usec += s->thread_info.latency_offset;
1074 else
1075 usec = 0;
1076
1077 return usec;
1078 }
1079
1080 /* Called from the main thread (and also from the IO thread while the main
1081 * thread is waiting).
1082 *
1083 * When a source uses volume sharing, it never has the PA_SOURCE_FLAT_VOLUME flag
1084 * set. Instead, flat volume mode is detected by checking whether the root source
1085 * has the flag set. */
1086 pa_bool_t pa_source_flat_volume_enabled(pa_source *s) {
1087 pa_source_assert_ref(s);
1088
1089 s = pa_source_get_master(s);
1090
1091 if (PA_LIKELY(s))
1092 return (s->flags & PA_SOURCE_FLAT_VOLUME);
1093 else
1094 return FALSE;
1095 }
1096
1097 /* Called from the main thread (and also from the IO thread while the main
1098 * thread is waiting). */
1099 pa_source *pa_source_get_master(pa_source *s) {
1100 pa_source_assert_ref(s);
1101
1102 while (s && (s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1103 if (PA_UNLIKELY(!s->output_from_master))
1104 return NULL;
1105
1106 s = s->output_from_master->source;
1107 }
1108
1109 return s;
1110 }
1111
1112 /* Called from main context */
1113 pa_bool_t pa_source_is_passthrough(pa_source *s) {
1114
1115 pa_source_assert_ref(s);
1116
1117 /* NB Currently only monitor sources support passthrough mode */
1118 return (s->monitor_of && pa_sink_is_passthrough(s->monitor_of));
1119 }
1120
1121 /* Called from main context */
1122 void pa_source_enter_passthrough(pa_source *s) {
1123 pa_cvolume volume;
1124
1125 /* set the volume to NORM */
1126 s->saved_volume = *pa_source_get_volume(s, TRUE);
1127 s->saved_save_volume = s->save_volume;
1128
1129 pa_cvolume_set(&volume, s->sample_spec.channels, PA_MIN(s->base_volume, PA_VOLUME_NORM));
1130 pa_source_set_volume(s, &volume, TRUE, FALSE);
1131 }
1132
1133 /* Called from main context */
1134 void pa_source_leave_passthrough(pa_source *s) {
1135 /* Restore source volume to what it was before we entered passthrough mode */
1136 pa_source_set_volume(s, &s->saved_volume, TRUE, s->saved_save_volume);
1137
1138 pa_cvolume_init(&s->saved_volume);
1139 s->saved_save_volume = FALSE;
1140 }
1141
1142 /* Called from main context. */
1143 static void compute_reference_ratio(pa_source_output *o) {
1144 unsigned c = 0;
1145 pa_cvolume remapped;
1146
1147 pa_assert(o);
1148 pa_assert(pa_source_flat_volume_enabled(o->source));
1149
1150 /*
1151 * Calculates the reference ratio from the source's reference
1152 * volume. This basically calculates:
1153 *
1154 * o->reference_ratio = o->volume / o->source->reference_volume
1155 */
1156
1157 remapped = o->source->reference_volume;
1158 pa_cvolume_remap(&remapped, &o->source->channel_map, &o->channel_map);
1159
1160 o->reference_ratio.channels = o->sample_spec.channels;
1161
1162 for (c = 0; c < o->sample_spec.channels; c++) {
1163
1164 /* We don't update when the source volume is 0 anyway */
1165 if (remapped.values[c] <= PA_VOLUME_MUTED)
1166 continue;
1167
1168 /* Don't update the reference ratio unless necessary */
1169 if (pa_sw_volume_multiply(
1170 o->reference_ratio.values[c],
1171 remapped.values[c]) == o->volume.values[c])
1172 continue;
1173
1174 o->reference_ratio.values[c] = pa_sw_volume_divide(
1175 o->volume.values[c],
1176 remapped.values[c]);
1177 }
1178 }
1179
1180 /* Called from main context. Only called for the root source in volume sharing
1181 * cases, except for internal recursive calls. */
1182 static void compute_reference_ratios(pa_source *s) {
1183 uint32_t idx;
1184 pa_source_output *o;
1185
1186 pa_source_assert_ref(s);
1187 pa_assert_ctl_context();
1188 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1189 pa_assert(pa_source_flat_volume_enabled(s));
1190
1191 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1192 compute_reference_ratio(o);
1193
1194 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1195 compute_reference_ratios(o->destination_source);
1196 }
1197 }
1198
1199 /* Called from main context. Only called for the root source in volume sharing
1200 * cases, except for internal recursive calls. */
1201 static void compute_real_ratios(pa_source *s) {
1202 pa_source_output *o;
1203 uint32_t idx;
1204
1205 pa_source_assert_ref(s);
1206 pa_assert_ctl_context();
1207 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1208 pa_assert(pa_source_flat_volume_enabled(s));
1209
1210 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1211 unsigned c;
1212 pa_cvolume remapped;
1213
1214 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1215 /* The origin source uses volume sharing, so this input's real ratio
1216 * is handled as a special case - the real ratio must be 0 dB, and
1217 * as a result i->soft_volume must equal i->volume_factor. */
1218 pa_cvolume_reset(&o->real_ratio, o->real_ratio.channels);
1219 o->soft_volume = o->volume_factor;
1220
1221 compute_real_ratios(o->destination_source);
1222
1223 continue;
1224 }
1225
1226 /*
1227 * This basically calculates:
1228 *
1229 * i->real_ratio := i->volume / s->real_volume
1230 * i->soft_volume := i->real_ratio * i->volume_factor
1231 */
1232
1233 remapped = s->real_volume;
1234 pa_cvolume_remap(&remapped, &s->channel_map, &o->channel_map);
1235
1236 o->real_ratio.channels = o->sample_spec.channels;
1237 o->soft_volume.channels = o->sample_spec.channels;
1238
1239 for (c = 0; c < o->sample_spec.channels; c++) {
1240
1241 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1242 /* We leave o->real_ratio untouched */
1243 o->soft_volume.values[c] = PA_VOLUME_MUTED;
1244 continue;
1245 }
1246
1247 /* Don't lose accuracy unless necessary */
1248 if (pa_sw_volume_multiply(
1249 o->real_ratio.values[c],
1250 remapped.values[c]) != o->volume.values[c])
1251
1252 o->real_ratio.values[c] = pa_sw_volume_divide(
1253 o->volume.values[c],
1254 remapped.values[c]);
1255
1256 o->soft_volume.values[c] = pa_sw_volume_multiply(
1257 o->real_ratio.values[c],
1258 o->volume_factor.values[c]);
1259 }
1260
1261 /* We don't copy the soft_volume to the thread_info data
1262 * here. That must be done by the caller */
1263 }
1264 }
1265
1266 static pa_cvolume *cvolume_remap_minimal_impact(
1267 pa_cvolume *v,
1268 const pa_cvolume *template,
1269 const pa_channel_map *from,
1270 const pa_channel_map *to) {
1271
1272 pa_cvolume t;
1273
1274 pa_assert(v);
1275 pa_assert(template);
1276 pa_assert(from);
1277 pa_assert(to);
1278 pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
1279 pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
1280
1281 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1282 * mapping from source output to source volumes:
1283 *
1284 * If template is a possible remapping from v it is used instead
1285 * of remapping anew.
1286 *
1287 * If the channel maps don't match we set an all-channel volume on
1288 * the source to ensure that changing a volume on one stream has no
1289 * effect that cannot be compensated for in another stream that
1290 * does not have the same channel map as the source. */
1291
1292 if (pa_channel_map_equal(from, to))
1293 return v;
1294
1295 t = *template;
1296 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1297 *v = *template;
1298 return v;
1299 }
1300
1301 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1302 return v;
1303 }
1304
1305 /* Called from main thread. Only called for the root source in volume sharing
1306 * cases, except for internal recursive calls. */
1307 static void get_maximum_output_volume(pa_source *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
1308 pa_source_output *o;
1309 uint32_t idx;
1310
1311 pa_source_assert_ref(s);
1312 pa_assert(max_volume);
1313 pa_assert(channel_map);
1314 pa_assert(pa_source_flat_volume_enabled(s));
1315
1316 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1317 pa_cvolume remapped;
1318
1319 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1320 get_maximum_output_volume(o->destination_source, max_volume, channel_map);
1321
1322 /* Ignore this output. The origin source uses volume sharing, so this
1323 * output's volume will be set to be equal to the root source's real
1324 * volume. Obviously this output's current volume must not then
1325 * affect what the root source's real volume will be. */
1326 continue;
1327 }
1328
1329 remapped = o->volume;
1330 cvolume_remap_minimal_impact(&remapped, max_volume, &o->channel_map, channel_map);
1331 pa_cvolume_merge(max_volume, max_volume, &remapped);
1332 }
1333 }
1334
1335 /* Called from main thread. Only called for the root source in volume sharing
1336 * cases, except for internal recursive calls. */
1337 static pa_bool_t has_outputs(pa_source *s) {
1338 pa_source_output *o;
1339 uint32_t idx;
1340
1341 pa_source_assert_ref(s);
1342
1343 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1344 if (!o->destination_source || !(o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER) || has_outputs(o->destination_source))
1345 return TRUE;
1346 }
1347
1348 return FALSE;
1349 }
1350
1351 /* Called from main thread. Only called for the root source in volume sharing
1352 * cases, except for internal recursive calls. */
1353 static void update_real_volume(pa_source *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
1354 pa_source_output *o;
1355 uint32_t idx;
1356
1357 pa_source_assert_ref(s);
1358 pa_assert(new_volume);
1359 pa_assert(channel_map);
1360
1361 s->real_volume = *new_volume;
1362 pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
1363
1364 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1365 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1366 if (pa_source_flat_volume_enabled(s)) {
1367 pa_cvolume old_volume = o->volume;
1368
1369 /* Follow the root source's real volume. */
1370 o->volume = *new_volume;
1371 pa_cvolume_remap(&o->volume, channel_map, &o->channel_map);
1372 compute_reference_ratio(o);
1373
1374 /* The volume changed, let's tell people so */
1375 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1376 if (o->volume_changed)
1377 o->volume_changed(o);
1378
1379 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1380 }
1381 }
1382
1383 update_real_volume(o->destination_source, new_volume, channel_map);
1384 }
1385 }
1386 }
1387
1388 /* Called from main thread. Only called for the root source in shared volume
1389 * cases. */
1390 static void compute_real_volume(pa_source *s) {
1391 pa_source_assert_ref(s);
1392 pa_assert_ctl_context();
1393 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1394 pa_assert(pa_source_flat_volume_enabled(s));
1395 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1396
1397 /* This determines the maximum volume of all streams and sets
1398 * s->real_volume accordingly. */
1399
1400 if (!has_outputs(s)) {
1401 /* In the special case that we have no source outputs we leave the
1402 * volume unmodified. */
1403 update_real_volume(s, &s->reference_volume, &s->channel_map);
1404 return;
1405 }
1406
1407 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1408
1409 /* First let's determine the new maximum volume of all outputs
1410 * connected to this source */
1411 get_maximum_output_volume(s, &s->real_volume, &s->channel_map);
1412 update_real_volume(s, &s->real_volume, &s->channel_map);
1413
1414 /* Then, let's update the real ratios/soft volumes of all outputs
1415 * connected to this source */
1416 compute_real_ratios(s);
1417 }
1418
1419 /* Called from main thread. Only called for the root source in shared volume
1420 * cases, except for internal recursive calls. */
1421 static void propagate_reference_volume(pa_source *s) {
1422 pa_source_output *o;
1423 uint32_t idx;
1424
1425 pa_source_assert_ref(s);
1426 pa_assert_ctl_context();
1427 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1428 pa_assert(pa_source_flat_volume_enabled(s));
1429
1430 /* This is called whenever the source volume changes that is not
1431 * caused by a source output volume change. We need to fix up the
1432 * source output volumes accordingly */
1433
1434 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1435 pa_cvolume old_volume;
1436
1437 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1438 propagate_reference_volume(o->destination_source);
1439
1440 /* Since the origin source uses volume sharing, this output's volume
1441 * needs to be updated to match the root source's real volume, but
1442 * that will be done later in update_shared_real_volume(). */
1443 continue;
1444 }
1445
1446 old_volume = o->volume;
1447
1448 /* This basically calculates:
1449 *
1450 * o->volume := o->reference_volume * o->reference_ratio */
1451
1452 o->volume = s->reference_volume;
1453 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1454 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1455
1456 /* The volume changed, let's tell people so */
1457 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1458
1459 if (o->volume_changed)
1460 o->volume_changed(o);
1461
1462 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1463 }
1464 }
1465 }
1466
1467 /* Called from main thread. Only called for the root source in volume sharing
1468 * cases, except for internal recursive calls. The return value indicates
1469 * whether any reference volume actually changed. */
1470 static pa_bool_t update_reference_volume(pa_source *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
1471 pa_cvolume volume;
1472 pa_bool_t reference_volume_changed;
1473 pa_source_output *o;
1474 uint32_t idx;
1475
1476 pa_source_assert_ref(s);
1477 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1478 pa_assert(v);
1479 pa_assert(channel_map);
1480 pa_assert(pa_cvolume_valid(v));
1481
1482 volume = *v;
1483 pa_cvolume_remap(&volume, channel_map, &s->channel_map);
1484
1485 reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
1486 s->reference_volume = volume;
1487
1488 s->save_volume = (!reference_volume_changed && s->save_volume) || save;
1489
1490 if (reference_volume_changed)
1491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1492 else if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1493 /* If the root source's volume doesn't change, then there can't be any
1494 * changes in the other source in the source tree either.
1495 *
1496 * It's probably theoretically possible that even if the root source's
1497 * volume changes slightly, some filter source doesn't change its volume
1498 * due to rounding errors. If that happens, we still want to propagate
1499 * the changed root source volume to the sources connected to the
1500 * intermediate source that didn't change its volume. This theoretical
1501 * possibility is the reason why we have that !(s->flags &
1502 * PA_SOURCE_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
1503 * notice even if we returned here FALSE always if
1504 * reference_volume_changed is FALSE. */
1505 return FALSE;
1506
1507 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1508 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1509 update_reference_volume(o->destination_source, v, channel_map, FALSE);
1510 }
1511
1512 return TRUE;
1513 }
1514
1515 /* Called from main thread */
1516 void pa_source_set_volume(
1517 pa_source *s,
1518 const pa_cvolume *volume,
1519 pa_bool_t send_msg,
1520 pa_bool_t save) {
1521
1522 pa_cvolume new_reference_volume;
1523 pa_source *root_source;
1524
1525 pa_source_assert_ref(s);
1526 pa_assert_ctl_context();
1527 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1528 pa_assert(!volume || pa_cvolume_valid(volume));
1529 pa_assert(volume || pa_source_flat_volume_enabled(s));
1530 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1531
1532 /* make sure we don't change the volume in PASSTHROUGH mode ...
1533 * ... *except* if we're being invoked to reset the volume to ensure 0 dB gain */
1534 if (pa_source_is_passthrough(s) && (!volume || !pa_cvolume_is_norm(volume))) {
1535 pa_log_warn("Cannot change volume, source is monitor of a PASSTHROUGH sink");
1536 return;
1537 }
1538
1539 /* In case of volume sharing, the volume is set for the root source first,
1540 * from which it's then propagated to the sharing sources. */
1541 root_source = pa_source_get_master(s);
1542
1543 if (PA_UNLIKELY(!root_source))
1544 return;
1545
1546 /* As a special exception we accept mono volumes on all sources --
1547 * even on those with more complex channel maps */
1548
1549 if (volume) {
1550 if (pa_cvolume_compatible(volume, &s->sample_spec))
1551 new_reference_volume = *volume;
1552 else {
1553 new_reference_volume = s->reference_volume;
1554 pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
1555 }
1556
1557 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1558
1559 if (update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save)) {
1560 if (pa_source_flat_volume_enabled(root_source)) {
1561 /* OK, propagate this volume change back to the outputs */
1562 propagate_reference_volume(root_source);
1563
1564 /* And now recalculate the real volume */
1565 compute_real_volume(root_source);
1566 } else
1567 update_real_volume(root_source, &root_source->reference_volume, &root_source->channel_map);
1568 }
1569
1570 } else {
1571 /* If volume is NULL we synchronize the source's real and
1572 * reference volumes with the stream volumes. */
1573
1574 pa_assert(pa_source_flat_volume_enabled(root_source));
1575
1576 /* Ok, let's determine the new real volume */
1577 compute_real_volume(root_source);
1578
1579 /* Let's 'push' the reference volume if necessary */
1580 pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_source->real_volume);
1581 /* If the source and it's root don't have the same number of channels, we need to remap */
1582 if (s != root_source && !pa_channel_map_equal(&s->channel_map, &root_source->channel_map))
1583 pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_source->channel_map);
1584 update_reference_volume(root_source, &new_reference_volume, &root_source->channel_map, save);
1585
1586 /* Now that the reference volume is updated, we can update the streams'
1587 * reference ratios. */
1588 compute_reference_ratios(root_source);
1589 }
1590
1591 if (root_source->set_volume) {
1592 /* If we have a function set_volume(), then we do not apply a
1593 * soft volume by default. However, set_volume() is free to
1594 * apply one to root_source->soft_volume */
1595
1596 pa_cvolume_reset(&root_source->soft_volume, root_source->sample_spec.channels);
1597 if (!(root_source->flags & PA_SOURCE_DEFERRED_VOLUME))
1598 root_source->set_volume(root_source);
1599
1600 } else
1601 /* If we have no function set_volume(), then the soft volume
1602 * becomes the real volume */
1603 root_source->soft_volume = root_source->real_volume;
1604
1605 /* This tells the source that soft volume and/or real volume changed */
1606 if (send_msg)
1607 pa_assert_se(pa_asyncmsgq_send(root_source->asyncmsgq, PA_MSGOBJECT(root_source), PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
1608 }
1609
1610 /* Called from the io thread if sync volume is used, otherwise from the main thread.
1611 * Only to be called by source implementor */
1612 void pa_source_set_soft_volume(pa_source *s, const pa_cvolume *volume) {
1613
1614 pa_source_assert_ref(s);
1615 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1616
1617 if (s->flags & PA_SOURCE_DEFERRED_VOLUME)
1618 pa_source_assert_io_context(s);
1619 else
1620 pa_assert_ctl_context();
1621
1622 if (!volume)
1623 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1624 else
1625 s->soft_volume = *volume;
1626
1627 if (PA_SOURCE_IS_LINKED(s->state) && !(s->flags & PA_SOURCE_DEFERRED_VOLUME))
1628 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1629 else
1630 s->thread_info.soft_volume = s->soft_volume;
1631 }
1632
1633 /* Called from the main thread. Only called for the root source in volume sharing
1634 * cases, except for internal recursive calls. */
1635 static void propagate_real_volume(pa_source *s, const pa_cvolume *old_real_volume) {
1636 pa_source_output *o;
1637 uint32_t idx;
1638
1639 pa_source_assert_ref(s);
1640 pa_assert(old_real_volume);
1641 pa_assert_ctl_context();
1642 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1643
1644 /* This is called when the hardware's real volume changes due to
1645 * some external event. We copy the real volume into our
1646 * reference volume and then rebuild the stream volumes based on
1647 * i->real_ratio which should stay fixed. */
1648
1649 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER)) {
1650 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1651 return;
1652
1653 /* 1. Make the real volume the reference volume */
1654 update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
1655 }
1656
1657 if (pa_source_flat_volume_enabled(s)) {
1658
1659 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1660 pa_cvolume old_volume = o->volume;
1661
1662 /* 2. Since the source's reference and real volumes are equal
1663 * now our ratios should be too. */
1664 o->reference_ratio = o->real_ratio;
1665
1666 /* 3. Recalculate the new stream reference volume based on the
1667 * reference ratio and the sink's reference volume.
1668 *
1669 * This basically calculates:
1670 *
1671 * o->volume = s->reference_volume * o->reference_ratio
1672 *
1673 * This is identical to propagate_reference_volume() */
1674 o->volume = s->reference_volume;
1675 pa_cvolume_remap(&o->volume, &s->channel_map, &o->channel_map);
1676 pa_sw_cvolume_multiply(&o->volume, &o->volume, &o->reference_ratio);
1677
1678 /* Notify if something changed */
1679 if (!pa_cvolume_equal(&old_volume, &o->volume)) {
1680
1681 if (o->volume_changed)
1682 o->volume_changed(o);
1683
1684 pa_subscription_post(o->core, PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE, o->index);
1685 }
1686
1687 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1688 propagate_real_volume(o->destination_source, old_real_volume);
1689 }
1690 }
1691
1692 /* Something got changed in the hardware. It probably makes sense
1693 * to save changed hw settings given that hw volume changes not
1694 * triggered by PA are almost certainly done by the user. */
1695 if (!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1696 s->save_volume = TRUE;
1697 }
1698
1699 /* Called from io thread */
1700 void pa_source_update_volume_and_mute(pa_source *s) {
1701 pa_assert(s);
1702 pa_source_assert_io_context(s);
1703
1704 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
1705 }
1706
1707 /* Called from main thread */
1708 const pa_cvolume *pa_source_get_volume(pa_source *s, pa_bool_t force_refresh) {
1709 pa_source_assert_ref(s);
1710 pa_assert_ctl_context();
1711 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1712
1713 if (s->refresh_volume || force_refresh) {
1714 struct pa_cvolume old_real_volume;
1715
1716 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1717
1718 old_real_volume = s->real_volume;
1719
1720 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume)
1721 s->get_volume(s);
1722
1723 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1724
1725 update_real_volume(s, &s->real_volume, &s->channel_map);
1726 propagate_real_volume(s, &old_real_volume);
1727 }
1728
1729 return &s->reference_volume;
1730 }
1731
1732 /* Called from main thread. In volume sharing cases, only the root source may
1733 * call this. */
1734 void pa_source_volume_changed(pa_source *s, const pa_cvolume *new_real_volume) {
1735 pa_cvolume old_real_volume;
1736
1737 pa_source_assert_ref(s);
1738 pa_assert_ctl_context();
1739 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1740 pa_assert(!(s->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER));
1741
1742 /* The source implementor may call this if the volume changed to make sure everyone is notified */
1743
1744 old_real_volume = s->real_volume;
1745 update_real_volume(s, new_real_volume, &s->channel_map);
1746 propagate_real_volume(s, &old_real_volume);
1747 }
1748
1749 /* Called from main thread */
1750 void pa_source_set_mute(pa_source *s, pa_bool_t mute, pa_bool_t save) {
1751 pa_bool_t old_muted;
1752
1753 pa_source_assert_ref(s);
1754 pa_assert_ctl_context();
1755 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1756
1757 old_muted = s->muted;
1758 s->muted = mute;
1759 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1760
1761 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->set_mute)
1762 s->set_mute(s);
1763
1764 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1765
1766 if (old_muted != s->muted)
1767 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1768 }
1769
1770 /* Called from main thread */
1771 pa_bool_t pa_source_get_mute(pa_source *s, pa_bool_t force_refresh) {
1772
1773 pa_source_assert_ref(s);
1774 pa_assert_ctl_context();
1775 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1776
1777 if (s->refresh_muted || force_refresh) {
1778 pa_bool_t old_muted = s->muted;
1779
1780 if (!(s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_mute)
1781 s->get_mute(s);
1782
1783 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1784
1785 if (old_muted != s->muted) {
1786 s->save_muted = TRUE;
1787
1788 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1789
1790 /* Make sure the soft mute status stays in sync */
1791 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1792 }
1793 }
1794
1795 return s->muted;
1796 }
1797
1798 /* Called from main thread */
1799 void pa_source_mute_changed(pa_source *s, pa_bool_t new_muted) {
1800 pa_source_assert_ref(s);
1801 pa_assert_ctl_context();
1802 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1803
1804 /* The source implementor may call this if the mute state changed to make sure everyone is notified */
1805
1806 if (s->muted == new_muted)
1807 return;
1808
1809 s->muted = new_muted;
1810 s->save_muted = TRUE;
1811
1812 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1813 }
1814
1815 /* Called from main thread */
1816 pa_bool_t pa_source_update_proplist(pa_source *s, pa_update_mode_t mode, pa_proplist *p) {
1817 pa_source_assert_ref(s);
1818 pa_assert_ctl_context();
1819
1820 if (p)
1821 pa_proplist_update(s->proplist, mode, p);
1822
1823 if (PA_SOURCE_IS_LINKED(s->state)) {
1824 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1825 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1826 }
1827
1828 return TRUE;
1829 }
1830
1831 /* Called from main thread */
1832 /* FIXME -- this should be dropped and be merged into pa_source_update_proplist() */
1833 void pa_source_set_description(pa_source *s, const char *description) {
1834 const char *old;
1835 pa_source_assert_ref(s);
1836 pa_assert_ctl_context();
1837
1838 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1839 return;
1840
1841 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1842
1843 if (old && description && pa_streq(old, description))
1844 return;
1845
1846 if (description)
1847 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1848 else
1849 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1850
1851 if (PA_SOURCE_IS_LINKED(s->state)) {
1852 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1853 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PROPLIST_CHANGED], s);
1854 }
1855 }
1856
1857 /* Called from main thread */
1858 unsigned pa_source_linked_by(pa_source *s) {
1859 pa_source_assert_ref(s);
1860 pa_assert_ctl_context();
1861 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1862
1863 return pa_idxset_size(s->outputs);
1864 }
1865
1866 /* Called from main thread */
1867 unsigned pa_source_used_by(pa_source *s) {
1868 unsigned ret;
1869
1870 pa_source_assert_ref(s);
1871 pa_assert_ctl_context();
1872 pa_assert(PA_SOURCE_IS_LINKED(s->state));
1873
1874 ret = pa_idxset_size(s->outputs);
1875 pa_assert(ret >= s->n_corked);
1876
1877 return ret - s->n_corked;
1878 }
1879
1880 /* Called from main thread */
1881 unsigned pa_source_check_suspend(pa_source *s) {
1882 unsigned ret;
1883 pa_source_output *o;
1884 uint32_t idx;
1885
1886 pa_source_assert_ref(s);
1887 pa_assert_ctl_context();
1888
1889 if (!PA_SOURCE_IS_LINKED(s->state))
1890 return 0;
1891
1892 ret = 0;
1893
1894 PA_IDXSET_FOREACH(o, s->outputs, idx) {
1895 pa_source_output_state_t st;
1896
1897 st = pa_source_output_get_state(o);
1898
1899 /* We do not assert here. It is perfectly valid for a source output to
1900 * be in the INIT state (i.e. created, marked done but not yet put)
1901 * and we should not care if it's unlinked as it won't contribute
1902 * towards our busy status.
1903 */
1904 if (!PA_SOURCE_OUTPUT_IS_LINKED(st))
1905 continue;
1906
1907 if (st == PA_SOURCE_OUTPUT_CORKED)
1908 continue;
1909
1910 if (o->flags & PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND)
1911 continue;
1912
1913 ret ++;
1914 }
1915
1916 return ret;
1917 }
1918
1919 /* Called from the IO thread */
1920 static void sync_output_volumes_within_thread(pa_source *s) {
1921 pa_source_output *o;
1922 void *state = NULL;
1923
1924 pa_source_assert_ref(s);
1925 pa_source_assert_io_context(s);
1926
1927 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1928 if (pa_cvolume_equal(&o->thread_info.soft_volume, &o->soft_volume))
1929 continue;
1930
1931 o->thread_info.soft_volume = o->soft_volume;
1932 //pa_source_output_request_rewind(o, 0, TRUE, FALSE, FALSE);
1933 }
1934 }
1935
1936 /* Called from the IO thread. Only called for the root source in volume sharing
1937 * cases, except for internal recursive calls. */
1938 static void set_shared_volume_within_thread(pa_source *s) {
1939 pa_source_output *o;
1940 void *state = NULL;
1941
1942 pa_source_assert_ref(s);
1943
1944 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
1945
1946 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state) {
1947 if (o->destination_source && (o->destination_source->flags & PA_SOURCE_SHARE_VOLUME_WITH_MASTER))
1948 set_shared_volume_within_thread(o->destination_source);
1949 }
1950 }
1951
1952 /* Called from IO thread, except when it is not */
1953 int pa_source_process_msg(pa_msgobject *object, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1954 pa_source *s = PA_SOURCE(object);
1955 pa_source_assert_ref(s);
1956
1957 switch ((pa_source_message_t) code) {
1958
1959 case PA_SOURCE_MESSAGE_ADD_OUTPUT: {
1960 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1961
1962 pa_hashmap_put(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index), pa_source_output_ref(o));
1963
1964 if (o->direct_on_input) {
1965 o->thread_info.direct_on_input = o->direct_on_input;
1966 pa_hashmap_put(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index), o);
1967 }
1968
1969 pa_assert(!o->thread_info.attached);
1970 o->thread_info.attached = TRUE;
1971
1972 if (o->attach)
1973 o->attach(o);
1974
1975 pa_source_output_set_state_within_thread(o, o->state);
1976
1977 if (o->thread_info.requested_source_latency != (pa_usec_t) -1)
1978 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1979
1980 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
1981
1982 /* We don't just invalidate the requested latency here,
1983 * because if we are in a move we might need to fix up the
1984 * requested latency. */
1985 pa_source_output_set_requested_latency_within_thread(o, o->thread_info.requested_source_latency);
1986
1987 /* In flat volume mode we need to update the volume as
1988 * well */
1989 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
1990 }
1991
1992 case PA_SOURCE_MESSAGE_REMOVE_OUTPUT: {
1993 pa_source_output *o = PA_SOURCE_OUTPUT(userdata);
1994
1995 pa_source_output_set_state_within_thread(o, o->state);
1996
1997 if (o->detach)
1998 o->detach(o);
1999
2000 pa_assert(o->thread_info.attached);
2001 o->thread_info.attached = FALSE;
2002
2003 if (o->thread_info.direct_on_input) {
2004 pa_hashmap_remove(o->thread_info.direct_on_input->thread_info.direct_outputs, PA_UINT32_TO_PTR(o->index));
2005 o->thread_info.direct_on_input = NULL;
2006 }
2007
2008 if (pa_hashmap_remove(s->thread_info.outputs, PA_UINT32_TO_PTR(o->index)))
2009 pa_source_output_unref(o);
2010
2011 pa_source_invalidate_requested_latency(s, TRUE);
2012
2013 /* In flat volume mode we need to update the volume as
2014 * well */
2015 return object->process_msg(object, PA_SOURCE_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
2016 }
2017
2018 case PA_SOURCE_MESSAGE_SET_SHARED_VOLUME: {
2019 pa_source *root_source = pa_source_get_master(s);
2020
2021 if (PA_LIKELY(root_source))
2022 set_shared_volume_within_thread(root_source);
2023
2024 return 0;
2025 }
2026
2027 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
2028
2029 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2030 s->set_volume(s);
2031 pa_source_volume_change_push(s);
2032 }
2033 /* Fall through ... */
2034
2035 case PA_SOURCE_MESSAGE_SET_VOLUME:
2036
2037 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2038 s->thread_info.soft_volume = s->soft_volume;
2039 }
2040
2041 /* Fall through ... */
2042
2043 case PA_SOURCE_MESSAGE_SYNC_VOLUMES:
2044 sync_output_volumes_within_thread(s);
2045 return 0;
2046
2047 case PA_SOURCE_MESSAGE_GET_VOLUME:
2048
2049 if ((s->flags & PA_SOURCE_DEFERRED_VOLUME) && s->get_volume) {
2050 s->get_volume(s);
2051 pa_source_volume_change_flush(s);
2052 pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
2053 }
2054
2055 /* In case source implementor reset SW volume. */
2056 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2057 s->thread_info.soft_volume = s->soft_volume;
2058 }
2059
2060 return 0;
2061
2062 case PA_SOURCE_MESSAGE_SET_MUTE:
2063
2064 if (s->thread_info.soft_muted != s->muted) {
2065 s->thread_info.soft_muted = s->muted;
2066 }
2067
2068 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->set_mute)
2069 s->set_mute(s);
2070
2071 return 0;
2072
2073 case PA_SOURCE_MESSAGE_GET_MUTE:
2074
2075 if (s->flags & PA_SOURCE_DEFERRED_VOLUME && s->get_mute)
2076 s->get_mute(s);
2077
2078 return 0;
2079
2080 case PA_SOURCE_MESSAGE_SET_STATE: {
2081
2082 pa_bool_t suspend_change =
2083 (s->thread_info.state == PA_SOURCE_SUSPENDED && PA_SOURCE_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2084 (PA_SOURCE_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SOURCE_SUSPENDED);
2085
2086 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2087
2088 if (suspend_change) {
2089 pa_source_output *o;
2090 void *state = NULL;
2091
2092 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2093 if (o->suspend_within_thread)
2094 o->suspend_within_thread(o, s->thread_info.state == PA_SOURCE_SUSPENDED);
2095 }
2096
2097 return 0;
2098 }
2099
2100 case PA_SOURCE_MESSAGE_DETACH:
2101
2102 /* Detach all streams */
2103 pa_source_detach_within_thread(s);
2104 return 0;
2105
2106 case PA_SOURCE_MESSAGE_ATTACH:
2107
2108 /* Reattach all streams */
2109 pa_source_attach_within_thread(s);
2110 return 0;
2111
2112 case PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY: {
2113
2114 pa_usec_t *usec = userdata;
2115 *usec = pa_source_get_requested_latency_within_thread(s);
2116
2117 /* Yes, that's right, the IO thread will see -1 when no
2118 * explicit requested latency is configured, the main
2119 * thread will see max_latency */
2120 if (*usec == (pa_usec_t) -1)
2121 *usec = s->thread_info.max_latency;
2122
2123 return 0;
2124 }
2125
2126 case PA_SOURCE_MESSAGE_SET_LATENCY_RANGE: {
2127 pa_usec_t *r = userdata;
2128
2129 pa_source_set_latency_range_within_thread(s, r[0], r[1]);
2130
2131 return 0;
2132 }
2133
2134 case PA_SOURCE_MESSAGE_GET_LATENCY_RANGE: {
2135 pa_usec_t *r = userdata;
2136
2137 r[0] = s->thread_info.min_latency;
2138 r[1] = s->thread_info.max_latency;
2139
2140 return 0;
2141 }
2142
2143 case PA_SOURCE_MESSAGE_GET_FIXED_LATENCY:
2144
2145 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2146 return 0;
2147
2148 case PA_SOURCE_MESSAGE_SET_FIXED_LATENCY:
2149
2150 pa_source_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2151 return 0;
2152
2153 case PA_SOURCE_MESSAGE_GET_MAX_REWIND:
2154
2155 *((size_t*) userdata) = s->thread_info.max_rewind;
2156 return 0;
2157
2158 case PA_SOURCE_MESSAGE_SET_MAX_REWIND:
2159
2160 pa_source_set_max_rewind_within_thread(s, (size_t) offset);
2161 return 0;
2162
2163 case PA_SOURCE_MESSAGE_GET_LATENCY:
2164
2165 if (s->monitor_of) {
2166 *((pa_usec_t*) userdata) = 0;
2167 return 0;
2168 }
2169
2170 /* Implementors need to overwrite this implementation! */
2171 return -1;
2172
2173 case PA_SOURCE_MESSAGE_SET_PORT:
2174
2175 pa_assert(userdata);
2176 if (s->set_port) {
2177 struct source_message_set_port *msg_data = userdata;
2178 msg_data->ret = s->set_port(s, msg_data->port);
2179 }
2180 return 0;
2181
2182 case PA_SOURCE_MESSAGE_UPDATE_VOLUME_AND_MUTE:
2183 /* This message is sent from IO-thread and handled in main thread. */
2184 pa_assert_ctl_context();
2185
2186 /* Make sure we're not messing with main thread when no longer linked */
2187 if (!PA_SOURCE_IS_LINKED(s->state))
2188 return 0;
2189
2190 pa_source_get_volume(s, TRUE);
2191 pa_source_get_mute(s, TRUE);
2192 return 0;
2193
2194 case PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET:
2195 s->thread_info.latency_offset = offset;
2196 return 0;
2197
2198 case PA_SOURCE_MESSAGE_MAX:
2199 ;
2200 }
2201
2202 return -1;
2203 }
2204
2205 /* Called from main thread */
2206 int pa_source_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2207 pa_source *source;
2208 uint32_t idx;
2209 int ret = 0;
2210
2211 pa_core_assert_ref(c);
2212 pa_assert_ctl_context();
2213 pa_assert(cause != 0);
2214
2215 for (source = PA_SOURCE(pa_idxset_first(c->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(c->sources, &idx))) {
2216 int r;
2217
2218 if (source->monitor_of)
2219 continue;
2220
2221 if ((r = pa_source_suspend(source, suspend, cause)) < 0)
2222 ret = r;
2223 }
2224
2225 return ret;
2226 }
2227
2228 /* Called from main thread */
2229 void pa_source_detach(pa_source *s) {
2230 pa_source_assert_ref(s);
2231 pa_assert_ctl_context();
2232 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2233
2234 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2235 }
2236
2237 /* Called from main thread */
2238 void pa_source_attach(pa_source *s) {
2239 pa_source_assert_ref(s);
2240 pa_assert_ctl_context();
2241 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2242
2243 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2244 }
2245
2246 /* Called from IO thread */
2247 void pa_source_detach_within_thread(pa_source *s) {
2248 pa_source_output *o;
2249 void *state = NULL;
2250
2251 pa_source_assert_ref(s);
2252 pa_source_assert_io_context(s);
2253 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2254
2255 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2256 if (o->detach)
2257 o->detach(o);
2258 }
2259
2260 /* Called from IO thread */
2261 void pa_source_attach_within_thread(pa_source *s) {
2262 pa_source_output *o;
2263 void *state = NULL;
2264
2265 pa_source_assert_ref(s);
2266 pa_source_assert_io_context(s);
2267 pa_assert(PA_SOURCE_IS_LINKED(s->thread_info.state));
2268
2269 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2270 if (o->attach)
2271 o->attach(o);
2272 }
2273
2274 /* Called from IO thread */
2275 pa_usec_t pa_source_get_requested_latency_within_thread(pa_source *s) {
2276 pa_usec_t result = (pa_usec_t) -1;
2277 pa_source_output *o;
2278 void *state = NULL;
2279
2280 pa_source_assert_ref(s);
2281 pa_source_assert_io_context(s);
2282
2283 if (!(s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2284 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2285
2286 if (s->thread_info.requested_latency_valid)
2287 return s->thread_info.requested_latency;
2288
2289 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2290 if (o->thread_info.requested_source_latency != (pa_usec_t) -1 &&
2291 (result == (pa_usec_t) -1 || result > o->thread_info.requested_source_latency))
2292 result = o->thread_info.requested_source_latency;
2293
2294 if (result != (pa_usec_t) -1)
2295 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2296
2297 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2298 /* Only cache this if we are fully set up */
2299 s->thread_info.requested_latency = result;
2300 s->thread_info.requested_latency_valid = TRUE;
2301 }
2302
2303 return result;
2304 }
2305
2306 /* Called from main thread */
2307 pa_usec_t pa_source_get_requested_latency(pa_source *s) {
2308 pa_usec_t usec = 0;
2309
2310 pa_source_assert_ref(s);
2311 pa_assert_ctl_context();
2312 pa_assert(PA_SOURCE_IS_LINKED(s->state));
2313
2314 if (s->state == PA_SOURCE_SUSPENDED)
2315 return 0;
2316
2317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2318
2319 return usec;
2320 }
2321
2322 /* Called from IO thread */
2323 void pa_source_set_max_rewind_within_thread(pa_source *s, size_t max_rewind) {
2324 pa_source_output *o;
2325 void *state = NULL;
2326
2327 pa_source_assert_ref(s);
2328 pa_source_assert_io_context(s);
2329
2330 if (max_rewind == s->thread_info.max_rewind)
2331 return;
2332
2333 s->thread_info.max_rewind = max_rewind;
2334
2335 if (PA_SOURCE_IS_LINKED(s->thread_info.state))
2336 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2337 pa_source_output_update_max_rewind(o, s->thread_info.max_rewind);
2338 }
2339
2340 /* Called from main thread */
2341 void pa_source_set_max_rewind(pa_source *s, size_t max_rewind) {
2342 pa_source_assert_ref(s);
2343 pa_assert_ctl_context();
2344
2345 if (PA_SOURCE_IS_LINKED(s->state))
2346 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2347 else
2348 pa_source_set_max_rewind_within_thread(s, max_rewind);
2349 }
2350
2351 /* Called from IO thread */
2352 void pa_source_invalidate_requested_latency(pa_source *s, pa_bool_t dynamic) {
2353 pa_source_output *o;
2354 void *state = NULL;
2355
2356 pa_source_assert_ref(s);
2357 pa_source_assert_io_context(s);
2358
2359 if ((s->flags & PA_SOURCE_DYNAMIC_LATENCY))
2360 s->thread_info.requested_latency_valid = FALSE;
2361 else if (dynamic)
2362 return;
2363
2364 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2365
2366 if (s->update_requested_latency)
2367 s->update_requested_latency(s);
2368
2369 while ((o = pa_hashmap_iterate(s->thread_info.outputs, &state, NULL)))
2370 if (o->update_source_requested_latency)
2371 o->update_source_requested_latency(o);
2372 }
2373
2374 if (s->monitor_of)
2375 pa_sink_invalidate_requested_latency(s->monitor_of, dynamic);
2376 }
2377
2378 /* Called from main thread */
2379 void pa_source_set_latency_range(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2380 pa_source_assert_ref(s);
2381 pa_assert_ctl_context();
2382
2383 /* min_latency == 0: no limit
2384 * min_latency anything else: specified limit
2385 *
2386 * Similar for max_latency */
2387
2388 if (min_latency < ABSOLUTE_MIN_LATENCY)
2389 min_latency = ABSOLUTE_MIN_LATENCY;
2390
2391 if (max_latency <= 0 ||
2392 max_latency > ABSOLUTE_MAX_LATENCY)
2393 max_latency = ABSOLUTE_MAX_LATENCY;
2394
2395 pa_assert(min_latency <= max_latency);
2396
2397 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2398 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2399 max_latency == ABSOLUTE_MAX_LATENCY) ||
2400 (s->flags & PA_SOURCE_DYNAMIC_LATENCY));
2401
2402 if (PA_SOURCE_IS_LINKED(s->state)) {
2403 pa_usec_t r[2];
2404
2405 r[0] = min_latency;
2406 r[1] = max_latency;
2407
2408 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2409 } else
2410 pa_source_set_latency_range_within_thread(s, min_latency, max_latency);
2411 }
2412
2413 /* Called from main thread */
2414 void pa_source_get_latency_range(pa_source *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2415 pa_source_assert_ref(s);
2416 pa_assert_ctl_context();
2417 pa_assert(min_latency);
2418 pa_assert(max_latency);
2419
2420 if (PA_SOURCE_IS_LINKED(s->state)) {
2421 pa_usec_t r[2] = { 0, 0 };
2422
2423 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2424
2425 *min_latency = r[0];
2426 *max_latency = r[1];
2427 } else {
2428 *min_latency = s->thread_info.min_latency;
2429 *max_latency = s->thread_info.max_latency;
2430 }
2431 }
2432
2433 /* Called from IO thread, and from main thread before pa_source_put() is called */
2434 void pa_source_set_latency_range_within_thread(pa_source *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2435 pa_source_assert_ref(s);
2436 pa_source_assert_io_context(s);
2437
2438 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2439 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2440 pa_assert(min_latency <= max_latency);
2441
2442 /* Hmm, let's see if someone forgot to set PA_SOURCE_DYNAMIC_LATENCY here... */
2443 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2444 max_latency == ABSOLUTE_MAX_LATENCY) ||
2445 (s->flags & PA_SOURCE_DYNAMIC_LATENCY) ||
2446 s->monitor_of);
2447
2448 if (s->thread_info.min_latency == min_latency &&
2449 s->thread_info.max_latency == max_latency)
2450 return;
2451
2452 s->thread_info.min_latency = min_latency;
2453 s->thread_info.max_latency = max_latency;
2454
2455 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2456 pa_source_output *o;
2457 void *state = NULL;
2458
2459 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2460 if (o->update_source_latency_range)
2461 o->update_source_latency_range(o);
2462 }
2463
2464 pa_source_invalidate_requested_latency(s, FALSE);
2465 }
2466
2467 /* Called from main thread, before the source is put */
2468 void pa_source_set_fixed_latency(pa_source *s, pa_usec_t latency) {
2469 pa_source_assert_ref(s);
2470 pa_assert_ctl_context();
2471
2472 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2473 pa_assert(latency == 0);
2474 return;
2475 }
2476
2477 if (latency < ABSOLUTE_MIN_LATENCY)
2478 latency = ABSOLUTE_MIN_LATENCY;
2479
2480 if (latency > ABSOLUTE_MAX_LATENCY)
2481 latency = ABSOLUTE_MAX_LATENCY;
2482
2483 if (PA_SOURCE_IS_LINKED(s->state))
2484 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2485 else
2486 s->thread_info.fixed_latency = latency;
2487 }
2488
2489 /* Called from main thread */
2490 pa_usec_t pa_source_get_fixed_latency(pa_source *s) {
2491 pa_usec_t latency;
2492
2493 pa_source_assert_ref(s);
2494 pa_assert_ctl_context();
2495
2496 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY)
2497 return 0;
2498
2499 if (PA_SOURCE_IS_LINKED(s->state))
2500 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2501 else
2502 latency = s->thread_info.fixed_latency;
2503
2504 return latency;
2505 }
2506
2507 /* Called from IO thread */
2508 void pa_source_set_fixed_latency_within_thread(pa_source *s, pa_usec_t latency) {
2509 pa_source_assert_ref(s);
2510 pa_source_assert_io_context(s);
2511
2512 if (s->flags & PA_SOURCE_DYNAMIC_LATENCY) {
2513 pa_assert(latency == 0);
2514 return;
2515 }
2516
2517 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2518 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2519
2520 if (s->thread_info.fixed_latency == latency)
2521 return;
2522
2523 s->thread_info.fixed_latency = latency;
2524
2525 if (PA_SOURCE_IS_LINKED(s->thread_info.state)) {
2526 pa_source_output *o;
2527 void *state = NULL;
2528
2529 PA_HASHMAP_FOREACH(o, s->thread_info.outputs, state)
2530 if (o->update_source_fixed_latency)
2531 o->update_source_fixed_latency(o);
2532 }
2533
2534 pa_source_invalidate_requested_latency(s, FALSE);
2535 }
2536
2537 /* Called from main thread */
2538 void pa_source_set_latency_offset(pa_source *s, int64_t offset) {
2539 pa_source_assert_ref(s);
2540
2541 s->latency_offset = offset;
2542
2543 if (PA_SOURCE_IS_LINKED(s->state))
2544 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_LATENCY_OFFSET, NULL, offset, NULL) == 0);
2545 else
2546 s->thread_info.latency_offset = offset;
2547 }
2548
2549 /* Called from main thread */
2550 size_t pa_source_get_max_rewind(pa_source *s) {
2551 size_t r;
2552 pa_assert_ctl_context();
2553 pa_source_assert_ref(s);
2554
2555 if (!PA_SOURCE_IS_LINKED(s->state))
2556 return s->thread_info.max_rewind;
2557
2558 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2559
2560 return r;
2561 }
2562
2563 /* Called from main context */
2564 int pa_source_set_port(pa_source *s, const char *name, pa_bool_t save) {
2565 pa_device_port *port;
2566 int ret;
2567
2568 pa_source_assert_ref(s);
2569 pa_assert_ctl_context();
2570
2571 if (!s->set_port) {
2572 pa_log_debug("set_port() operation not implemented for source %u \"%s\"", s->index, s->name);
2573 return -PA_ERR_NOTIMPLEMENTED;
2574 }
2575
2576 if (!name)
2577 return -PA_ERR_NOENTITY;
2578
2579 if (!(port = pa_hashmap_get(s->ports, name)))
2580 return -PA_ERR_NOENTITY;
2581
2582 if (s->active_port == port) {
2583 s->save_port = s->save_port || save;
2584 return 0;
2585 }
2586
2587 if (s->flags & PA_SOURCE_DEFERRED_VOLUME) {
2588 struct source_message_set_port msg = { .port = port, .ret = 0 };
2589 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SOURCE_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
2590 ret = msg.ret;
2591 }
2592 else
2593 ret = s->set_port(s, port);
2594
2595 if (ret < 0)
2596 return -PA_ERR_NOENTITY;
2597
2598 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2599
2600 pa_log_info("Changed port of source %u \"%s\" to %s", s->index, s->name, port->name);
2601
2602 s->active_port = port;
2603 s->save_port = save;
2604
2605 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SOURCE_PORT_CHANGED], s);
2606
2607 return 0;
2608 }
2609
2610 PA_STATIC_FLIST_DECLARE(pa_source_volume_change, 0, pa_xfree);
2611
2612 /* Called from the IO thread. */
2613 static pa_source_volume_change *pa_source_volume_change_new(pa_source *s) {
2614 pa_source_volume_change *c;
2615 if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_source_volume_change))))
2616 c = pa_xnew(pa_source_volume_change, 1);
2617
2618 PA_LLIST_INIT(pa_source_volume_change, c);
2619 c->at = 0;
2620 pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
2621 return c;
2622 }
2623
2624 /* Called from the IO thread. */
2625 static void pa_source_volume_change_free(pa_source_volume_change *c) {
2626 pa_assert(c);
2627 if (pa_flist_push(PA_STATIC_FLIST_GET(pa_source_volume_change), c) < 0)
2628 pa_xfree(c);
2629 }
2630
2631 /* Called from the IO thread. */
2632 void pa_source_volume_change_push(pa_source *s) {
2633 pa_source_volume_change *c = NULL;
2634 pa_source_volume_change *nc = NULL;
2635 uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
2636
2637 const char *direction = NULL;
2638
2639 pa_assert(s);
2640 nc = pa_source_volume_change_new(s);
2641
2642 /* NOTE: There is already more different volumes in pa_source that I can remember.
2643 * Adding one more volume for HW would get us rid of this, but I am trying
2644 * to survive with the ones we already have. */
2645 pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
2646
2647 if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
2648 pa_log_debug("Volume not changing");
2649 pa_source_volume_change_free(nc);
2650 return;
2651 }
2652
2653 nc->at = pa_source_get_latency_within_thread(s);
2654 nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
2655
2656 if (s->thread_info.volume_changes_tail) {
2657 for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
2658 /* If volume is going up let's do it a bit late. If it is going
2659 * down let's do it a bit early. */
2660 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
2661 if (nc->at + safety_margin > c->at) {
2662 nc->at += safety_margin;
2663 direction = "up";
2664 break;
2665 }
2666 }
2667 else if (nc->at - safety_margin > c->at) {
2668 nc->at -= safety_margin;
2669 direction = "down";
2670 break;
2671 }
2672 }
2673 }
2674
2675 if (c == NULL) {
2676 if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
2677 nc->at += safety_margin;
2678 direction = "up";
2679 } else {
2680 nc->at -= safety_margin;
2681 direction = "down";
2682 }
2683 PA_LLIST_PREPEND(pa_source_volume_change, s->thread_info.volume_changes, nc);
2684 }
2685 else {
2686 PA_LLIST_INSERT_AFTER(pa_source_volume_change, s->thread_info.volume_changes, c, nc);
2687 }
2688
2689 pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
2690
2691 /* We can ignore volume events that came earlier but should happen later than this. */
2692 PA_LLIST_FOREACH(c, nc->next) {
2693 pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
2694 pa_source_volume_change_free(c);
2695 }
2696 nc->next = NULL;
2697 s->thread_info.volume_changes_tail = nc;
2698 }
2699
2700 /* Called from the IO thread. */
2701 static void pa_source_volume_change_flush(pa_source *s) {
2702 pa_source_volume_change *c = s->thread_info.volume_changes;
2703 pa_assert(s);
2704 s->thread_info.volume_changes = NULL;
2705 s->thread_info.volume_changes_tail = NULL;
2706 while (c) {
2707 pa_source_volume_change *next = c->next;
2708 pa_source_volume_change_free(c);
2709 c = next;
2710 }
2711 }
2712
2713 /* Called from the IO thread. */
2714 pa_bool_t pa_source_volume_change_apply(pa_source *s, pa_usec_t *usec_to_next) {
2715 pa_usec_t now;
2716 pa_bool_t ret = FALSE;
2717
2718 pa_assert(s);
2719
2720 if (!s->thread_info.volume_changes || !PA_SOURCE_IS_LINKED(s->state)) {
2721 if (usec_to_next)
2722 *usec_to_next = 0;
2723 return ret;
2724 }
2725
2726 pa_assert(s->write_volume);
2727
2728 now = pa_rtclock_now();
2729
2730 while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
2731 pa_source_volume_change *c = s->thread_info.volume_changes;
2732 PA_LLIST_REMOVE(pa_source_volume_change, s->thread_info.volume_changes, c);
2733 pa_log_debug("Volume change to %d at %llu was written %llu usec late",
2734 pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
2735 ret = TRUE;
2736 s->thread_info.current_hw_volume = c->hw_volume;
2737 pa_source_volume_change_free(c);
2738 }
2739
2740 if (ret)
2741 s->write_volume(s);
2742
2743 if (s->thread_info.volume_changes) {
2744 if (usec_to_next)
2745 *usec_to_next = s->thread_info.volume_changes->at - now;
2746 if (pa_log_ratelimit(PA_LOG_DEBUG))
2747 pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
2748 }
2749 else {
2750 if (usec_to_next)
2751 *usec_to_next = 0;
2752 s->thread_info.volume_changes_tail = NULL;
2753 }
2754 return ret;
2755 }
2756
2757
2758 /* Called from the main thread */
2759 /* Gets the list of formats supported by the source. The members and idxset must
2760 * be freed by the caller. */
2761 pa_idxset* pa_source_get_formats(pa_source *s) {
2762 pa_idxset *ret;
2763
2764 pa_assert(s);
2765
2766 if (s->get_formats) {
2767 /* Source supports format query, all is good */
2768 ret = s->get_formats(s);
2769 } else {
2770 /* Source doesn't support format query, so assume it does PCM */
2771 pa_format_info *f = pa_format_info_new();
2772 f->encoding = PA_ENCODING_PCM;
2773
2774 ret = pa_idxset_new(NULL, NULL);
2775 pa_idxset_put(ret, f, NULL);
2776 }
2777
2778 return ret;
2779 }
2780
2781 /* Called from the main thread */
2782 /* Checks if the source can accept this format */
2783 pa_bool_t pa_source_check_format(pa_source *s, pa_format_info *f)
2784 {
2785 pa_idxset *formats = NULL;
2786 pa_bool_t ret = FALSE;
2787
2788 pa_assert(s);
2789 pa_assert(f);
2790
2791 formats = pa_source_get_formats(s);
2792
2793 if (formats) {
2794 pa_format_info *finfo_device;
2795 uint32_t i;
2796
2797 PA_IDXSET_FOREACH(finfo_device, formats, i) {
2798 if (pa_format_info_is_compatible(finfo_device, f)) {
2799 ret = TRUE;
2800 break;
2801 }
2802 }
2803
2804 pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2805 }
2806
2807 return ret;
2808 }
2809
2810 /* Called from the main thread */
2811 /* Calculates the intersection between formats supported by the source and
2812 * in_formats, and returns these, in the order of the source's formats. */
2813 pa_idxset* pa_source_check_formats(pa_source *s, pa_idxset *in_formats) {
2814 pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *source_formats = NULL;
2815 pa_format_info *f_source, *f_in;
2816 uint32_t i, j;
2817
2818 pa_assert(s);
2819
2820 if (!in_formats || pa_idxset_isempty(in_formats))
2821 goto done;
2822
2823 source_formats = pa_source_get_formats(s);
2824
2825 PA_IDXSET_FOREACH(f_source, source_formats, i) {
2826 PA_IDXSET_FOREACH(f_in, in_formats, j) {
2827 if (pa_format_info_is_compatible(f_source, f_in))
2828 pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
2829 }
2830 }
2831
2832 done:
2833 if (source_formats)
2834 pa_idxset_free(source_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
2835
2836 return out_formats;
2837 }