]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
after propagating a sink volume change to the sink inputs recalculate their soft...
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53
54 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
55
56 static void sink_free(pa_object *s);
57
58 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
59 pa_assert(data);
60
61 memset(data, 0, sizeof(*data));
62 data->proplist = pa_proplist_new();
63
64 return data;
65 }
66
67 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
68 pa_assert(data);
69
70 pa_xfree(data->name);
71 data->name = pa_xstrdup(name);
72 }
73
74 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
75 pa_assert(data);
76
77 if ((data->sample_spec_is_set = !!spec))
78 data->sample_spec = *spec;
79 }
80
81 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
82 pa_assert(data);
83
84 if ((data->channel_map_is_set = !!map))
85 data->channel_map = *map;
86 }
87
88 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
89 pa_assert(data);
90
91 if ((data->volume_is_set = !!volume))
92 data->volume = *volume;
93 }
94
95 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
96 pa_assert(data);
97
98 data->muted_is_set = TRUE;
99 data->muted = !!mute;
100 }
101
102 void pa_sink_new_data_done(pa_sink_new_data *data) {
103 pa_assert(data);
104
105 pa_xfree(data->name);
106 pa_proplist_free(data->proplist);
107 }
108
109 /* Called from main context */
110 static void reset_callbacks(pa_sink *s) {
111 pa_assert(s);
112
113 s->set_state = NULL;
114 s->get_volume = NULL;
115 s->set_volume = NULL;
116 s->get_mute = NULL;
117 s->set_mute = NULL;
118 s->request_rewind = NULL;
119 s->update_requested_latency = NULL;
120 }
121
122 /* Called from main context */
123 pa_sink* pa_sink_new(
124 pa_core *core,
125 pa_sink_new_data *data,
126 pa_sink_flags_t flags) {
127
128 pa_sink *s;
129 const char *name;
130 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
131 pa_source_new_data source_data;
132 const char *dn;
133 char *pt;
134
135 pa_assert(core);
136 pa_assert(data);
137 pa_assert(data->name);
138
139 s = pa_msgobject_new(pa_sink);
140
141 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
142 pa_xfree(s);
143 return NULL;
144 }
145
146 pa_sink_new_data_set_name(data, name);
147
148 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
149 pa_xfree(s);
150 pa_namereg_unregister(core, name);
151 return NULL;
152 }
153
154 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
155 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
156
157 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
158
159 if (!data->channel_map_is_set)
160 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
161
162 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
163 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
164
165 if (!data->volume_is_set)
166 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
167
168 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
169 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
170
171 if (!data->muted_is_set)
172 data->muted = FALSE;
173
174 if (data->card)
175 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
176
177 pa_device_init_description(data->proplist);
178 pa_device_init_icon(data->proplist, TRUE);
179
180 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
181 pa_xfree(s);
182 pa_namereg_unregister(core, name);
183 return NULL;
184 }
185
186 s->parent.parent.free = sink_free;
187 s->parent.process_msg = pa_sink_process_msg;
188
189 s->core = core;
190 s->state = PA_SINK_INIT;
191 s->flags = flags;
192 s->name = pa_xstrdup(name);
193 s->proplist = pa_proplist_copy(data->proplist);
194 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
195 s->module = data->module;
196 s->card = data->card;
197
198 s->sample_spec = data->sample_spec;
199 s->channel_map = data->channel_map;
200
201 s->inputs = pa_idxset_new(NULL, NULL);
202 s->n_corked = 0;
203
204 s->virtual_volume = data->volume;
205 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
206 s->base_volume = PA_VOLUME_NORM;
207 s->n_volume_steps = PA_VOLUME_NORM+1;
208 s->muted = data->muted;
209 s->refresh_volume = s->refresh_muted = FALSE;
210
211 reset_callbacks(s);
212 s->userdata = NULL;
213
214 s->asyncmsgq = NULL;
215 s->rtpoll = NULL;
216
217 pa_silence_memchunk_get(
218 &core->silence_cache,
219 core->mempool,
220 &s->silence,
221 &s->sample_spec,
222 0);
223
224 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
225 s->thread_info.soft_volume = s->soft_volume;
226 s->thread_info.soft_muted = s->muted;
227 s->thread_info.state = s->state;
228 s->thread_info.rewind_nbytes = 0;
229 s->thread_info.rewind_requested = FALSE;
230 s->thread_info.max_rewind = 0;
231 s->thread_info.max_request = 0;
232 s->thread_info.requested_latency_valid = FALSE;
233 s->thread_info.requested_latency = 0;
234 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
235 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
236
237 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
238
239 if (s->card)
240 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
241
242 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
243 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
244 s->index,
245 s->name,
246 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
247 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
248 pt);
249 pa_xfree(pt);
250
251 pa_source_new_data_init(&source_data);
252 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
253 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
254 source_data.name = pa_sprintf_malloc("%s.monitor", name);
255 source_data.driver = data->driver;
256 source_data.module = data->module;
257 source_data.card = data->card;
258
259 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
260 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
261 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
262
263 s->monitor_source = pa_source_new(core, &source_data, 0);
264
265 pa_source_new_data_done(&source_data);
266
267 if (!s->monitor_source) {
268 pa_sink_unlink(s);
269 pa_sink_unref(s);
270 return NULL;
271 }
272
273 s->monitor_source->monitor_of = s;
274
275 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
276 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
277
278 return s;
279 }
280
281 /* Called from main context */
282 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
283 int ret;
284 pa_bool_t suspend_change;
285 pa_sink_state_t original_state;
286
287 pa_assert(s);
288
289 if (s->state == state)
290 return 0;
291
292 original_state = s->state;
293
294 suspend_change =
295 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
296 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
297
298 if (s->set_state)
299 if ((ret = s->set_state(s, state)) < 0)
300 return ret;
301
302 if (s->asyncmsgq)
303 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
304
305 if (s->set_state)
306 s->set_state(s, original_state);
307
308 return ret;
309 }
310
311 s->state = state;
312
313 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
314 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
315 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
316 }
317
318 if (suspend_change) {
319 pa_sink_input *i;
320 uint32_t idx;
321
322 /* We're suspending or resuming, tell everyone about it */
323
324 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
325 if (s->state == PA_SINK_SUSPENDED &&
326 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
327 pa_sink_input_kill(i);
328 else if (i->suspend)
329 i->suspend(i, state == PA_SINK_SUSPENDED);
330
331 if (s->monitor_source)
332 pa_source_sync_suspend(s->monitor_source);
333 }
334
335 return 0;
336 }
337
338 /* Called from main context */
339 void pa_sink_put(pa_sink* s) {
340 pa_sink_assert_ref(s);
341
342 pa_assert(s->state == PA_SINK_INIT);
343
344 /* The following fields must be initialized properly when calling _put() */
345 pa_assert(s->asyncmsgq);
346 pa_assert(s->rtpoll);
347 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
348
349 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL)) {
350 s->flags |= PA_SINK_DECIBEL_VOLUME;
351
352 s->thread_info.soft_volume = s->soft_volume;
353 s->thread_info.soft_muted = s->muted;
354 }
355
356 if (s->flags & PA_SINK_DECIBEL_VOLUME)
357 s->n_volume_steps = PA_VOLUME_NORM+1;
358
359 if (s->core->flat_volumes)
360 if (s->flags & PA_SINK_DECIBEL_VOLUME)
361 s->flags |= PA_SINK_FLAT_VOLUME;
362
363 if (s->flags & PA_SINK_LATENCY)
364 s->monitor_source->flags |= PA_SOURCE_LATENCY;
365
366 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
367 s->monitor_source->flags |= PA_SOURCE_DYNAMIC_LATENCY;
368
369 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
370
371 pa_source_put(s->monitor_source);
372
373 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
374 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
375 }
376
377 /* Called from main context */
378 void pa_sink_unlink(pa_sink* s) {
379 pa_bool_t linked;
380 pa_sink_input *i, *j = NULL;
381
382 pa_assert(s);
383
384 /* Please note that pa_sink_unlink() does more than simply
385 * reversing pa_sink_put(). It also undoes the registrations
386 * already done in pa_sink_new()! */
387
388 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
389 * may be called multiple times on the same sink without bad
390 * effects. */
391
392 linked = PA_SINK_IS_LINKED(s->state);
393
394 if (linked)
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
396
397 if (s->state != PA_SINK_UNLINKED)
398 pa_namereg_unregister(s->core, s->name);
399 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
400
401 if (s->card)
402 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
403
404 while ((i = pa_idxset_first(s->inputs, NULL))) {
405 pa_assert(i != j);
406 pa_sink_input_kill(i);
407 j = i;
408 }
409
410 if (linked)
411 sink_set_state(s, PA_SINK_UNLINKED);
412 else
413 s->state = PA_SINK_UNLINKED;
414
415 reset_callbacks(s);
416
417 if (s->monitor_source)
418 pa_source_unlink(s->monitor_source);
419
420 if (linked) {
421 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
422 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
423 }
424 }
425
426 /* Called from main context */
427 static void sink_free(pa_object *o) {
428 pa_sink *s = PA_SINK(o);
429 pa_sink_input *i;
430
431 pa_assert(s);
432 pa_assert(pa_sink_refcnt(s) == 0);
433
434 if (PA_SINK_IS_LINKED(s->state))
435 pa_sink_unlink(s);
436
437 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
438
439 if (s->monitor_source) {
440 pa_source_unref(s->monitor_source);
441 s->monitor_source = NULL;
442 }
443
444 pa_idxset_free(s->inputs, NULL, NULL);
445
446 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
447 pa_sink_input_unref(i);
448
449 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
450
451 if (s->silence.memblock)
452 pa_memblock_unref(s->silence.memblock);
453
454 pa_xfree(s->name);
455 pa_xfree(s->driver);
456
457 if (s->proplist)
458 pa_proplist_free(s->proplist);
459
460 pa_xfree(s);
461 }
462
463 /* Called from main context */
464 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
465 pa_sink_assert_ref(s);
466
467 s->asyncmsgq = q;
468
469 if (s->monitor_source)
470 pa_source_set_asyncmsgq(s->monitor_source, q);
471 }
472
473 /* Called from main context */
474 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
475 pa_sink_assert_ref(s);
476
477 s->rtpoll = p;
478 if (s->monitor_source)
479 pa_source_set_rtpoll(s->monitor_source, p);
480 }
481
482 /* Called from main context */
483 int pa_sink_update_status(pa_sink*s) {
484 pa_sink_assert_ref(s);
485 pa_assert(PA_SINK_IS_LINKED(s->state));
486
487 if (s->state == PA_SINK_SUSPENDED)
488 return 0;
489
490 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
491 }
492
493 /* Called from main context */
494 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend) {
495 pa_sink_assert_ref(s);
496 pa_assert(PA_SINK_IS_LINKED(s->state));
497
498 if (suspend)
499 return sink_set_state(s, PA_SINK_SUSPENDED);
500 else
501 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
502 }
503
504 /* Called from main context */
505 pa_queue *pa_sink_move_all_start(pa_sink *s) {
506 pa_queue *q;
507 pa_sink_input *i, *n;
508 uint32_t idx;
509
510 pa_sink_assert_ref(s);
511 pa_assert(PA_SINK_IS_LINKED(s->state));
512
513 q = pa_queue_new();
514
515 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
516 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
517
518 if (pa_sink_input_start_move(i) >= 0)
519 pa_queue_push(q, pa_sink_input_ref(i));
520 }
521
522 return q;
523 }
524
525 /* Called from main context */
526 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
527 pa_sink_input *i;
528
529 pa_sink_assert_ref(s);
530 pa_assert(PA_SINK_IS_LINKED(s->state));
531 pa_assert(q);
532
533 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
534 if (pa_sink_input_finish_move(i, s, save) < 0)
535 pa_sink_input_kill(i);
536
537 pa_sink_input_unref(i);
538 }
539
540 pa_queue_free(q, NULL, NULL);
541 }
542
543 /* Called from main context */
544 void pa_sink_move_all_fail(pa_queue *q) {
545 pa_sink_input *i;
546 pa_assert(q);
547
548 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
549 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
550 pa_sink_input_kill(i);
551 pa_sink_input_unref(i);
552 }
553 }
554
555 pa_queue_free(q, NULL, NULL);
556 }
557
558 /* Called from IO thread context */
559 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
560 pa_sink_input *i;
561 void *state = NULL;
562 pa_sink_assert_ref(s);
563 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
564
565 /* If nobody requested this and this is actually no real rewind
566 * then we can short cut this */
567 if (!s->thread_info.rewind_requested && nbytes <= 0)
568 return;
569
570 s->thread_info.rewind_nbytes = 0;
571 s->thread_info.rewind_requested = FALSE;
572
573 if (s->thread_info.state == PA_SINK_SUSPENDED)
574 return;
575
576 if (nbytes > 0)
577 pa_log_debug("Processing rewind...");
578
579 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
580 pa_sink_input_assert_ref(i);
581 pa_sink_input_process_rewind(i, nbytes);
582 }
583
584 if (nbytes > 0)
585 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
586 pa_source_process_rewind(s->monitor_source, nbytes);
587 }
588
589 /* Called from IO thread context */
590 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
591 pa_sink_input *i;
592 unsigned n = 0;
593 void *state = NULL;
594 size_t mixlength = *length;
595
596 pa_sink_assert_ref(s);
597 pa_assert(info);
598
599 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
600 pa_sink_input_assert_ref(i);
601
602 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
603
604 if (mixlength == 0 || info->chunk.length < mixlength)
605 mixlength = info->chunk.length;
606
607 if (pa_memblock_is_silence(info->chunk.memblock)) {
608 pa_memblock_unref(info->chunk.memblock);
609 continue;
610 }
611
612 info->userdata = pa_sink_input_ref(i);
613
614 pa_assert(info->chunk.memblock);
615 pa_assert(info->chunk.length > 0);
616
617 info++;
618 n++;
619 maxinfo--;
620 }
621
622 if (mixlength > 0)
623 *length = mixlength;
624
625 return n;
626 }
627
628 /* Called from IO thread context */
629 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
630 pa_sink_input *i;
631 void *state = NULL;
632 unsigned p = 0;
633 unsigned n_unreffed = 0;
634
635 pa_sink_assert_ref(s);
636 pa_assert(result);
637 pa_assert(result->memblock);
638 pa_assert(result->length > 0);
639
640 /* We optimize for the case where the order of the inputs has not changed */
641
642 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
643 unsigned j;
644 pa_mix_info* m = NULL;
645
646 pa_sink_input_assert_ref(i);
647
648 /* Let's try to find the matching entry info the pa_mix_info array */
649 for (j = 0; j < n; j ++) {
650
651 if (info[p].userdata == i) {
652 m = info + p;
653 break;
654 }
655
656 p++;
657 if (p >= n)
658 p = 0;
659 }
660
661 /* Drop read data */
662 pa_sink_input_drop(i, result->length);
663
664 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
665
666 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
667 void *ostate = NULL;
668 pa_source_output *o;
669 pa_memchunk c;
670
671 if (m && m->chunk.memblock) {
672 c = m->chunk;
673 pa_memblock_ref(c.memblock);
674 pa_assert(result->length <= c.length);
675 c.length = result->length;
676
677 pa_memchunk_make_writable(&c, 0);
678 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
679 } else {
680 c = s->silence;
681 pa_memblock_ref(c.memblock);
682 pa_assert(result->length <= c.length);
683 c.length = result->length;
684 }
685
686 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
687 pa_source_output_assert_ref(o);
688 pa_assert(o->direct_on_input == i);
689 pa_source_post_direct(s->monitor_source, o, &c);
690 }
691
692 pa_memblock_unref(c.memblock);
693 }
694 }
695
696 if (m) {
697 if (m->chunk.memblock)
698 pa_memblock_unref(m->chunk.memblock);
699 pa_memchunk_reset(&m->chunk);
700
701 pa_sink_input_unref(m->userdata);
702 m->userdata = NULL;
703
704 n_unreffed += 1;
705 }
706 }
707
708 /* Now drop references to entries that are included in the
709 * pa_mix_info array but don't exist anymore */
710
711 if (n_unreffed < n) {
712 for (; n > 0; info++, n--) {
713 if (info->userdata)
714 pa_sink_input_unref(info->userdata);
715 if (info->chunk.memblock)
716 pa_memblock_unref(info->chunk.memblock);
717 }
718 }
719
720 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
721 pa_source_post(s->monitor_source, result);
722 }
723
724 /* Called from IO thread context */
725 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
726 pa_mix_info info[MAX_MIX_CHANNELS];
727 unsigned n;
728 size_t block_size_max;
729
730 pa_sink_assert_ref(s);
731 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
732 pa_assert(pa_frame_aligned(length, &s->sample_spec));
733 pa_assert(result);
734
735 pa_sink_ref(s);
736
737 pa_assert(!s->thread_info.rewind_requested);
738 pa_assert(s->thread_info.rewind_nbytes == 0);
739
740 if (s->thread_info.state == PA_SINK_SUSPENDED) {
741 result->memblock = pa_memblock_ref(s->silence.memblock);
742 result->index = s->silence.index;
743 result->length = PA_MIN(s->silence.length, length);
744 return;
745 }
746
747 if (length <= 0)
748 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
749
750 block_size_max = pa_mempool_block_size_max(s->core->mempool);
751 if (length > block_size_max)
752 length = pa_frame_align(block_size_max, &s->sample_spec);
753
754 pa_assert(length > 0);
755
756 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
757
758 if (n == 0) {
759
760 *result = s->silence;
761 pa_memblock_ref(result->memblock);
762
763 if (result->length > length)
764 result->length = length;
765
766 } else if (n == 1) {
767 pa_cvolume volume;
768
769 *result = info[0].chunk;
770 pa_memblock_ref(result->memblock);
771
772 if (result->length > length)
773 result->length = length;
774
775 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
776
777 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
778 pa_memchunk_make_writable(result, 0);
779 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
780 pa_silence_memchunk(result, &s->sample_spec);
781 else
782 pa_volume_memchunk(result, &s->sample_spec, &volume);
783 }
784 } else {
785 void *ptr;
786 result->memblock = pa_memblock_new(s->core->mempool, length);
787
788 ptr = pa_memblock_acquire(result->memblock);
789 result->length = pa_mix(info, n,
790 ptr, length,
791 &s->sample_spec,
792 &s->thread_info.soft_volume,
793 s->thread_info.soft_muted);
794 pa_memblock_release(result->memblock);
795
796 result->index = 0;
797 }
798
799 inputs_drop(s, info, n, result);
800
801 pa_sink_unref(s);
802 }
803
804 /* Called from IO thread context */
805 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
806 pa_mix_info info[MAX_MIX_CHANNELS];
807 unsigned n;
808 size_t length, block_size_max;
809
810 pa_sink_assert_ref(s);
811 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
812 pa_assert(target);
813 pa_assert(target->memblock);
814 pa_assert(target->length > 0);
815 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
816
817 pa_sink_ref(s);
818
819 pa_assert(!s->thread_info.rewind_requested);
820 pa_assert(s->thread_info.rewind_nbytes == 0);
821
822 if (s->thread_info.state == PA_SINK_SUSPENDED) {
823 pa_silence_memchunk(target, &s->sample_spec);
824 return;
825 }
826
827 length = target->length;
828 block_size_max = pa_mempool_block_size_max(s->core->mempool);
829 if (length > block_size_max)
830 length = pa_frame_align(block_size_max, &s->sample_spec);
831
832 pa_assert(length > 0);
833
834 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
835
836 if (n == 0) {
837 if (target->length > length)
838 target->length = length;
839
840 pa_silence_memchunk(target, &s->sample_spec);
841 } else if (n == 1) {
842 pa_cvolume volume;
843
844 if (target->length > length)
845 target->length = length;
846
847 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
848
849 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
850 pa_silence_memchunk(target, &s->sample_spec);
851 else {
852 pa_memchunk vchunk;
853
854 vchunk = info[0].chunk;
855 pa_memblock_ref(vchunk.memblock);
856
857 if (vchunk.length > length)
858 vchunk.length = length;
859
860 if (!pa_cvolume_is_norm(&volume)) {
861 pa_memchunk_make_writable(&vchunk, 0);
862 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
863 }
864
865 pa_memchunk_memcpy(target, &vchunk);
866 pa_memblock_unref(vchunk.memblock);
867 }
868
869 } else {
870 void *ptr;
871
872 ptr = pa_memblock_acquire(target->memblock);
873
874 target->length = pa_mix(info, n,
875 (uint8_t*) ptr + target->index, length,
876 &s->sample_spec,
877 &s->thread_info.soft_volume,
878 s->thread_info.soft_muted);
879
880 pa_memblock_release(target->memblock);
881 }
882
883 inputs_drop(s, info, n, target);
884
885 pa_sink_unref(s);
886 }
887
888 /* Called from IO thread context */
889 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
890 pa_memchunk chunk;
891 size_t l, d;
892
893 pa_sink_assert_ref(s);
894 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
895 pa_assert(target);
896 pa_assert(target->memblock);
897 pa_assert(target->length > 0);
898 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
899
900 pa_sink_ref(s);
901
902 pa_assert(!s->thread_info.rewind_requested);
903 pa_assert(s->thread_info.rewind_nbytes == 0);
904
905 l = target->length;
906 d = 0;
907 while (l > 0) {
908 chunk = *target;
909 chunk.index += d;
910 chunk.length -= d;
911
912 pa_sink_render_into(s, &chunk);
913
914 d += chunk.length;
915 l -= chunk.length;
916 }
917
918 pa_sink_unref(s);
919 }
920
921 /* Called from IO thread context */
922 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
923 pa_sink_assert_ref(s);
924 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
925 pa_assert(length > 0);
926 pa_assert(pa_frame_aligned(length, &s->sample_spec));
927 pa_assert(result);
928
929 pa_assert(!s->thread_info.rewind_requested);
930 pa_assert(s->thread_info.rewind_nbytes == 0);
931
932 /*** This needs optimization ***/
933
934 result->index = 0;
935 result->length = length;
936 result->memblock = pa_memblock_new(s->core->mempool, length);
937
938 pa_sink_render_into_full(s, result);
939 }
940
941 /* Called from main thread */
942 pa_usec_t pa_sink_get_latency(pa_sink *s) {
943 pa_usec_t usec = 0;
944
945 pa_sink_assert_ref(s);
946 pa_assert(PA_SINK_IS_LINKED(s->state));
947
948 /* The returned value is supposed to be in the time domain of the sound card! */
949
950 if (s->state == PA_SINK_SUSPENDED)
951 return 0;
952
953 if (!(s->flags & PA_SINK_LATENCY))
954 return 0;
955
956 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
957
958 return usec;
959 }
960
961 /* Called from IO thread */
962 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
963 pa_usec_t usec = 0;
964 pa_msgobject *o;
965
966 pa_sink_assert_ref(s);
967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
968
969 /* The returned value is supposed to be in the time domain of the sound card! */
970
971 if (s->thread_info.state == PA_SINK_SUSPENDED)
972 return 0;
973
974 if (!(s->flags & PA_SINK_LATENCY))
975 return 0;
976
977 o = PA_MSGOBJECT(s);
978
979 /* We probably should make this a proper vtable callback instead of going through process_msg() */
980
981 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
982 return -1;
983
984 return usec;
985 }
986
987 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
988 unsigned c;
989
990 pa_sink_input_assert_ref(i);
991 pa_assert(new_volume->channels == i->sample_spec.channels);
992
993 /* This basically calculates i->soft_volume := i->virtual_volume / new_volume * i->volume_factor */
994
995 /* The new sink volume passed in here must already be remapped to
996 * the sink input's channel map! */
997
998 for (c = 0; c < i->sample_spec.channels; c++)
999
1000 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1001 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1002 else
1003 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1004 pa_sw_volume_to_linear(i->virtual_volume.values[c]) *
1005 pa_sw_volume_to_linear(i->volume_factor.values[c]) /
1006 pa_sw_volume_to_linear(new_volume->values[c]));
1007
1008 i->soft_volume.channels = i->sample_spec.channels;
1009
1010 /* Hooks have the ability to play games with i->soft_volume */
1011 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1012
1013 /* We don't copy the soft_volume to the thread_info data
1014 * here. That must be done by the caller */
1015 }
1016
1017 /* Called from main thread */
1018 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1019 pa_sink_input *i;
1020 uint32_t idx;
1021
1022 pa_sink_assert_ref(s);
1023 pa_assert(new_volume);
1024 pa_assert(PA_SINK_IS_LINKED(s->state));
1025 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1026
1027 /* This is called whenever a sink input volume changes and we
1028 * might need to fix up the sink volume accordingly. Please note
1029 * that we don't actually update the sinks volume here, we only
1030 * return how it needs to be updated. The caller should then call
1031 * pa_sink_set_volume().*/
1032
1033 if (pa_idxset_isempty(s->inputs)) {
1034 /* In the special case that we have no sink input we leave the
1035 * volume unmodified. */
1036 *new_volume = s->virtual_volume;
1037 return;
1038 }
1039
1040 pa_cvolume_mute(new_volume, s->channel_map.channels);
1041
1042 /* First let's determine the new maximum volume of all inputs
1043 * connected to this sink */
1044 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1045 unsigned c;
1046 pa_cvolume remapped_volume;
1047
1048 remapped_volume = i->virtual_volume;
1049 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1050
1051 for (c = 0; c < new_volume->channels; c++)
1052 if (remapped_volume.values[c] > new_volume->values[c])
1053 new_volume->values[c] = remapped_volume.values[c];
1054 }
1055
1056 /* Then, let's update the soft volumes of all inputs connected
1057 * to this sink */
1058 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1059 pa_cvolume remapped_new_volume;
1060
1061 remapped_new_volume = *new_volume;
1062 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1063 compute_new_soft_volume(i, &remapped_new_volume);
1064
1065 /* We don't copy soft_volume to the thread_info data here
1066 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1067 * want the update to be atomically with the sink volume
1068 * update, hence we do it within the pa_sink_set_volume() call
1069 * below */
1070 }
1071 }
1072
1073 /* Called from main thread */
1074 void pa_sink_propagate_flat_volume(pa_sink *s, const pa_cvolume *old_volume) {
1075 pa_sink_input *i;
1076 uint32_t idx;
1077
1078 pa_sink_assert_ref(s);
1079 pa_assert(old_volume);
1080 pa_assert(PA_SINK_IS_LINKED(s->state));
1081 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1082
1083 /* This is called whenever the sink volume changes that is not
1084 * caused by a sink input volume change. We need to fix up the
1085 * sink input volumes accordingly */
1086
1087 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1088 pa_cvolume remapped_old_volume, remapped_new_volume, new_virtual_volume;
1089 unsigned c;
1090
1091 /* This basically calculates i->virtual_volume := i->virtual_volume * s->virtual_volume / old_volume */
1092
1093 remapped_new_volume = s->virtual_volume;
1094 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1095
1096 remapped_old_volume = *old_volume;
1097 pa_cvolume_remap(&remapped_old_volume, &s->channel_map, &i->channel_map);
1098
1099 for (c = 0; c < i->sample_spec.channels; c++)
1100
1101 if (remapped_old_volume.values[c] <= PA_VOLUME_MUTED)
1102 new_virtual_volume.values[c] = remapped_new_volume.values[c];
1103 else
1104 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1105 pa_sw_volume_to_linear(i->virtual_volume.values[c]) *
1106 pa_sw_volume_to_linear(remapped_new_volume.values[c]) /
1107 pa_sw_volume_to_linear(remapped_old_volume.values[c]));
1108
1109 new_virtual_volume.channels = i->sample_spec.channels;
1110
1111 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1112 i->virtual_volume = new_virtual_volume;
1113
1114 /* Hmm, the soft volume might no longer actually match
1115 * what has been chosen as new virtual volume here,
1116 * especially when the old volume was
1117 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1118 * volumes here. */
1119 compute_new_soft_volume(i, &remapped_new_volume);
1120
1121 /* The virtual volume changed, let's tell people so */
1122 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1123 }
1124 }
1125
1126 /* If the soft_volume of any of the sink inputs got changed, let's
1127 * make sure the thread copies are synced up. */
1128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1129 }
1130
1131 /* Called from main thread */
1132 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg) {
1133 pa_cvolume old_virtual_volume;
1134 pa_bool_t virtual_volume_changed;
1135
1136 pa_sink_assert_ref(s);
1137 pa_assert(PA_SINK_IS_LINKED(s->state));
1138 pa_assert(volume);
1139 pa_assert(pa_cvolume_valid(volume));
1140 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1141
1142 old_virtual_volume = s->virtual_volume;
1143 s->virtual_volume = *volume;
1144 virtual_volume_changed = !pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume);
1145
1146 /* Propagate this volume change back to the inputs */
1147 if (virtual_volume_changed)
1148 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1149 pa_sink_propagate_flat_volume(s, &old_virtual_volume);
1150
1151 if (s->set_volume) {
1152 /* If we have a function set_volume(), then we do not apply a
1153 * soft volume by default. However, set_volume() is apply one
1154 * to s->soft_volume */
1155
1156 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1157 s->set_volume(s);
1158
1159 } else
1160 /* If we have no function set_volume(), then the soft volume
1161 * becomes the virtual volume */
1162 s->soft_volume = s->virtual_volume;
1163
1164 /* This tells the sink that soft and/or virtual volume changed */
1165 if (sendmsg)
1166 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1167
1168 if (virtual_volume_changed)
1169 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1170 }
1171
1172 /* Called from main thread. Only to be called by sink implementor */
1173 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1174 pa_sink_assert_ref(s);
1175 pa_assert(volume);
1176
1177 s->soft_volume = *volume;
1178
1179 if (PA_SINK_IS_LINKED(s->state))
1180 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1181 else
1182 s->thread_info.soft_volume = *volume;
1183 }
1184
1185 /* Called from main thread */
1186 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1187 pa_sink_assert_ref(s);
1188
1189 if (s->refresh_volume || force_refresh) {
1190 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1191
1192 if (s->get_volume)
1193 s->get_volume(s);
1194
1195 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1196
1197 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1198
1199 if (s->flags & PA_SINK_FLAT_VOLUME)
1200 pa_sink_propagate_flat_volume(s, &old_virtual_volume);
1201
1202 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1203 }
1204 }
1205
1206 return &s->virtual_volume;
1207 }
1208
1209 /* Called from main thread */
1210 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1211 pa_sink_assert_ref(s);
1212
1213 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1214
1215 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1216 return;
1217
1218 s->virtual_volume = *new_volume;
1219 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1220 }
1221
1222 /* Called from main thread */
1223 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1224 pa_bool_t old_muted;
1225
1226 pa_sink_assert_ref(s);
1227 pa_assert(PA_SINK_IS_LINKED(s->state));
1228
1229 old_muted = s->muted;
1230 s->muted = mute;
1231
1232 if (s->set_mute)
1233 s->set_mute(s);
1234
1235 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1236
1237 if (old_muted != s->muted)
1238 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1239 }
1240
1241 /* Called from main thread */
1242 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1243
1244 pa_sink_assert_ref(s);
1245
1246 if (s->refresh_muted || force_refresh) {
1247 pa_bool_t old_muted = s->muted;
1248
1249 if (s->get_mute)
1250 s->get_mute(s);
1251
1252 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1253
1254 if (old_muted != s->muted)
1255 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1256 }
1257
1258 return s->muted;
1259 }
1260
1261 /* Called from main thread */
1262 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1263 pa_sink_assert_ref(s);
1264
1265 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1266
1267 if (s->muted == new_muted)
1268 return;
1269
1270 s->muted = new_muted;
1271 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1272 }
1273
1274 /* Called from main thread */
1275 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1276 pa_sink_assert_ref(s);
1277
1278 if (p)
1279 pa_proplist_update(s->proplist, mode, p);
1280
1281 if (PA_SINK_IS_LINKED(s->state)) {
1282 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1283 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1284 }
1285
1286 return TRUE;
1287 }
1288
1289 /* Called from main thread */
1290 void pa_sink_set_description(pa_sink *s, const char *description) {
1291 const char *old;
1292 pa_sink_assert_ref(s);
1293
1294 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1295 return;
1296
1297 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1298
1299 if (old && description && !strcmp(old, description))
1300 return;
1301
1302 if (description)
1303 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1304 else
1305 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1306
1307 if (s->monitor_source) {
1308 char *n;
1309
1310 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1311 pa_source_set_description(s->monitor_source, n);
1312 pa_xfree(n);
1313 }
1314
1315 if (PA_SINK_IS_LINKED(s->state)) {
1316 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1317 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1318 }
1319 }
1320
1321 /* Called from main thread */
1322 unsigned pa_sink_linked_by(pa_sink *s) {
1323 unsigned ret;
1324
1325 pa_sink_assert_ref(s);
1326 pa_assert(PA_SINK_IS_LINKED(s->state));
1327
1328 ret = pa_idxset_size(s->inputs);
1329
1330 /* We add in the number of streams connected to us here. Please
1331 * note the asymmmetry to pa_sink_used_by()! */
1332
1333 if (s->monitor_source)
1334 ret += pa_source_linked_by(s->monitor_source);
1335
1336 return ret;
1337 }
1338
1339 /* Called from main thread */
1340 unsigned pa_sink_used_by(pa_sink *s) {
1341 unsigned ret;
1342
1343 pa_sink_assert_ref(s);
1344 pa_assert(PA_SINK_IS_LINKED(s->state));
1345
1346 ret = pa_idxset_size(s->inputs);
1347 pa_assert(ret >= s->n_corked);
1348
1349 /* Streams connected to our monitor source do not matter for
1350 * pa_sink_used_by()!.*/
1351
1352 return ret - s->n_corked;
1353 }
1354
1355 /* Called from main thread */
1356 unsigned pa_sink_check_suspend(pa_sink *s) {
1357 unsigned ret;
1358 pa_sink_input *i;
1359 uint32_t idx;
1360
1361 pa_sink_assert_ref(s);
1362
1363 if (!PA_SINK_IS_LINKED(s->state))
1364 return 0;
1365
1366 ret = 0;
1367
1368 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1369 pa_sink_input_state_t st;
1370
1371 st = pa_sink_input_get_state(i);
1372 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1373
1374 if (st == PA_SINK_INPUT_CORKED)
1375 continue;
1376
1377 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1378 continue;
1379
1380 ret ++;
1381 }
1382
1383 if (s->monitor_source)
1384 ret += pa_source_check_suspend(s->monitor_source);
1385
1386 return ret;
1387 }
1388
1389 /* Called from the IO thread */
1390 static void sync_input_volumes_within_thread(pa_sink *s) {
1391 pa_sink_input *i;
1392 void *state = NULL;
1393
1394 pa_sink_assert_ref(s);
1395
1396 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1397 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1398 continue;
1399
1400 i->thread_info.soft_volume = i->soft_volume;
1401 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1402 }
1403 }
1404
1405 /* Called from IO thread, except when it is not */
1406 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1407 pa_sink *s = PA_SINK(o);
1408 pa_sink_assert_ref(s);
1409
1410 switch ((pa_sink_message_t) code) {
1411
1412 case PA_SINK_MESSAGE_ADD_INPUT: {
1413 pa_sink_input *i = PA_SINK_INPUT(userdata);
1414
1415 /* If you change anything here, make sure to change the
1416 * sink input handling a few lines down at
1417 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1418
1419 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1420
1421 /* Since the caller sleeps in pa_sink_input_put(), we can
1422 * safely access data outside of thread_info even though
1423 * it is mutable */
1424
1425 if ((i->thread_info.sync_prev = i->sync_prev)) {
1426 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1427 pa_assert(i->sync_prev->sync_next == i);
1428 i->thread_info.sync_prev->thread_info.sync_next = i;
1429 }
1430
1431 if ((i->thread_info.sync_next = i->sync_next)) {
1432 pa_assert(i->sink == i->thread_info.sync_next->sink);
1433 pa_assert(i->sync_next->sync_prev == i);
1434 i->thread_info.sync_next->thread_info.sync_prev = i;
1435 }
1436
1437 pa_assert(!i->thread_info.attached);
1438 i->thread_info.attached = TRUE;
1439
1440 if (i->attach)
1441 i->attach(i);
1442
1443 pa_sink_input_set_state_within_thread(i, i->state);
1444
1445 /* The requested latency of the sink input needs to be
1446 * fixed up and then configured on the sink */
1447
1448 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1449 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1450
1451 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1452 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1453
1454 /* We don't rewind here automatically. This is left to the
1455 * sink input implementor because some sink inputs need a
1456 * slow start, i.e. need some time to buffer client
1457 * samples before beginning streaming. */
1458
1459 /* In flat volume mode we need to update the volume as
1460 * well */
1461 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1462 }
1463
1464 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1465 pa_sink_input *i = PA_SINK_INPUT(userdata);
1466
1467 /* If you change anything here, make sure to change the
1468 * sink input handling a few lines down at
1469 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1470
1471 if (i->detach)
1472 i->detach(i);
1473
1474 pa_sink_input_set_state_within_thread(i, i->state);
1475
1476 pa_assert(i->thread_info.attached);
1477 i->thread_info.attached = FALSE;
1478
1479 /* Since the caller sleeps in pa_sink_input_unlink(),
1480 * we can safely access data outside of thread_info even
1481 * though it is mutable */
1482
1483 pa_assert(!i->sync_prev);
1484 pa_assert(!i->sync_next);
1485
1486 if (i->thread_info.sync_prev) {
1487 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1488 i->thread_info.sync_prev = NULL;
1489 }
1490
1491 if (i->thread_info.sync_next) {
1492 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1493 i->thread_info.sync_next = NULL;
1494 }
1495
1496 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1497 pa_sink_input_unref(i);
1498
1499 pa_sink_invalidate_requested_latency(s);
1500 pa_sink_request_rewind(s, (size_t) -1);
1501
1502 /* In flat volume mode we need to update the volume as
1503 * well */
1504 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1505 }
1506
1507 case PA_SINK_MESSAGE_START_MOVE: {
1508 pa_sink_input *i = PA_SINK_INPUT(userdata);
1509
1510 /* We don't support moving synchronized streams. */
1511 pa_assert(!i->sync_prev);
1512 pa_assert(!i->sync_next);
1513 pa_assert(!i->thread_info.sync_next);
1514 pa_assert(!i->thread_info.sync_prev);
1515
1516 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1517 pa_usec_t usec = 0;
1518 size_t sink_nbytes, total_nbytes;
1519
1520 /* Get the latency of the sink */
1521 if (!(s->flags & PA_SINK_LATENCY) ||
1522 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1523 usec = 0;
1524
1525 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1526 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1527
1528 if (total_nbytes > 0) {
1529 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1530 i->thread_info.rewrite_flush = TRUE;
1531 pa_sink_input_process_rewind(i, sink_nbytes);
1532 }
1533 }
1534
1535 if (i->detach)
1536 i->detach(i);
1537
1538 pa_assert(i->thread_info.attached);
1539 i->thread_info.attached = FALSE;
1540
1541 /* Let's remove the sink input ...*/
1542 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1543 pa_sink_input_unref(i);
1544
1545 pa_sink_invalidate_requested_latency(s);
1546
1547 pa_log_debug("Requesting rewind due to started move");
1548 pa_sink_request_rewind(s, (size_t) -1);
1549
1550 /* In flat volume mode we need to update the volume as
1551 * well */
1552 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1553 }
1554
1555 case PA_SINK_MESSAGE_FINISH_MOVE: {
1556 pa_sink_input *i = PA_SINK_INPUT(userdata);
1557
1558 /* We don't support moving synchronized streams. */
1559 pa_assert(!i->sync_prev);
1560 pa_assert(!i->sync_next);
1561 pa_assert(!i->thread_info.sync_next);
1562 pa_assert(!i->thread_info.sync_prev);
1563
1564 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1565
1566 pa_assert(!i->thread_info.attached);
1567 i->thread_info.attached = TRUE;
1568
1569 if (i->attach)
1570 i->attach(i);
1571
1572 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1573 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1574
1575 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1576 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1577
1578 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1579 pa_usec_t usec = 0;
1580 size_t nbytes;
1581
1582 /* Get the latency of the sink */
1583 if (!(s->flags & PA_SINK_LATENCY) ||
1584 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1585 usec = 0;
1586
1587 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1588
1589 if (nbytes > 0)
1590 pa_sink_input_drop(i, nbytes);
1591
1592 pa_log_debug("Requesting rewind due to finished move");
1593 pa_sink_request_rewind(s, nbytes);
1594 }
1595
1596 /* In flat volume mode we need to update the volume as
1597 * well */
1598 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1599 }
1600
1601 case PA_SINK_MESSAGE_SET_VOLUME:
1602
1603 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1604 s->thread_info.soft_volume = s->soft_volume;
1605 pa_sink_request_rewind(s, (size_t) -1);
1606 }
1607
1608 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1609 return 0;
1610
1611 /* Fall through ... */
1612
1613 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1614 sync_input_volumes_within_thread(s);
1615 return 0;
1616
1617 case PA_SINK_MESSAGE_GET_VOLUME:
1618 return 0;
1619
1620 case PA_SINK_MESSAGE_SET_MUTE:
1621
1622 if (s->thread_info.soft_muted != s->muted) {
1623 s->thread_info.soft_muted = s->muted;
1624 pa_sink_request_rewind(s, (size_t) -1);
1625 }
1626
1627 return 0;
1628
1629 case PA_SINK_MESSAGE_GET_MUTE:
1630 return 0;
1631
1632 case PA_SINK_MESSAGE_SET_STATE: {
1633
1634 pa_bool_t suspend_change =
1635 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1636 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1637
1638 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1639
1640 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1641 s->thread_info.rewind_nbytes = 0;
1642 s->thread_info.rewind_requested = FALSE;
1643 }
1644
1645 if (suspend_change) {
1646 pa_sink_input *i;
1647 void *state = NULL;
1648
1649 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1650 if (i->suspend_within_thread)
1651 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1652 }
1653
1654 return 0;
1655 }
1656
1657 case PA_SINK_MESSAGE_DETACH:
1658
1659 /* Detach all streams */
1660 pa_sink_detach_within_thread(s);
1661 return 0;
1662
1663 case PA_SINK_MESSAGE_ATTACH:
1664
1665 /* Reattach all streams */
1666 pa_sink_attach_within_thread(s);
1667 return 0;
1668
1669 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1670
1671 pa_usec_t *usec = userdata;
1672 *usec = pa_sink_get_requested_latency_within_thread(s);
1673
1674 if (*usec == (pa_usec_t) -1)
1675 *usec = s->thread_info.max_latency;
1676
1677 return 0;
1678 }
1679
1680 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1681 pa_usec_t *r = userdata;
1682
1683 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1684
1685 return 0;
1686 }
1687
1688 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1689 pa_usec_t *r = userdata;
1690
1691 r[0] = s->thread_info.min_latency;
1692 r[1] = s->thread_info.max_latency;
1693
1694 return 0;
1695 }
1696
1697 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1698
1699 *((size_t*) userdata) = s->thread_info.max_rewind;
1700 return 0;
1701
1702 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1703
1704 *((size_t*) userdata) = s->thread_info.max_request;
1705 return 0;
1706
1707 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1708
1709 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1710 return 0;
1711
1712 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1713
1714 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1715 return 0;
1716
1717 case PA_SINK_MESSAGE_GET_LATENCY:
1718 case PA_SINK_MESSAGE_MAX:
1719 ;
1720 }
1721
1722 return -1;
1723 }
1724
1725 /* Called from main thread */
1726 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend) {
1727 pa_sink *sink;
1728 uint32_t idx;
1729 int ret = 0;
1730
1731 pa_core_assert_ref(c);
1732
1733 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1734 int r;
1735
1736 if ((r = pa_sink_suspend(sink, suspend)) < 0)
1737 ret = r;
1738 }
1739
1740 return ret;
1741 }
1742
1743 /* Called from main thread */
1744 void pa_sink_detach(pa_sink *s) {
1745 pa_sink_assert_ref(s);
1746 pa_assert(PA_SINK_IS_LINKED(s->state));
1747
1748 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1749 }
1750
1751 /* Called from main thread */
1752 void pa_sink_attach(pa_sink *s) {
1753 pa_sink_assert_ref(s);
1754 pa_assert(PA_SINK_IS_LINKED(s->state));
1755
1756 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1757 }
1758
1759 /* Called from IO thread */
1760 void pa_sink_detach_within_thread(pa_sink *s) {
1761 pa_sink_input *i;
1762 void *state = NULL;
1763
1764 pa_sink_assert_ref(s);
1765 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1766
1767 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1768 if (i->detach)
1769 i->detach(i);
1770
1771 if (s->monitor_source)
1772 pa_source_detach_within_thread(s->monitor_source);
1773 }
1774
1775 /* Called from IO thread */
1776 void pa_sink_attach_within_thread(pa_sink *s) {
1777 pa_sink_input *i;
1778 void *state = NULL;
1779
1780 pa_sink_assert_ref(s);
1781 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1782
1783 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1784 if (i->attach)
1785 i->attach(i);
1786
1787 if (s->monitor_source)
1788 pa_source_attach_within_thread(s->monitor_source);
1789 }
1790
1791 /* Called from IO thread */
1792 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1793 pa_sink_assert_ref(s);
1794 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1795
1796 if (s->thread_info.state == PA_SINK_SUSPENDED)
1797 return;
1798
1799 if (nbytes == (size_t) -1)
1800 nbytes = s->thread_info.max_rewind;
1801
1802 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1803
1804 if (s->thread_info.rewind_requested &&
1805 nbytes <= s->thread_info.rewind_nbytes)
1806 return;
1807
1808 s->thread_info.rewind_nbytes = nbytes;
1809 s->thread_info.rewind_requested = TRUE;
1810
1811 if (s->request_rewind)
1812 s->request_rewind(s);
1813 }
1814
1815 /* Called from IO thread */
1816 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1817 pa_usec_t result = (pa_usec_t) -1;
1818 pa_sink_input *i;
1819 void *state = NULL;
1820 pa_usec_t monitor_latency;
1821
1822 pa_sink_assert_ref(s);
1823
1824 if (s->thread_info.requested_latency_valid)
1825 return s->thread_info.requested_latency;
1826
1827 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1828
1829 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1830 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1831 result = i->thread_info.requested_sink_latency;
1832
1833 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1834
1835 if (monitor_latency != (pa_usec_t) -1 &&
1836 (result == (pa_usec_t) -1 || result > monitor_latency))
1837 result = monitor_latency;
1838
1839 if (result != (pa_usec_t) -1) {
1840 if (result > s->thread_info.max_latency)
1841 result = s->thread_info.max_latency;
1842
1843 if (result < s->thread_info.min_latency)
1844 result = s->thread_info.min_latency;
1845 }
1846
1847 s->thread_info.requested_latency = result;
1848 s->thread_info.requested_latency_valid = TRUE;
1849
1850 return result;
1851 }
1852
1853 /* Called from main thread */
1854 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1855 pa_usec_t usec = 0;
1856
1857 pa_sink_assert_ref(s);
1858 pa_assert(PA_SINK_IS_LINKED(s->state));
1859
1860 if (s->state == PA_SINK_SUSPENDED)
1861 return 0;
1862
1863 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1864 return usec;
1865 }
1866
1867 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1868 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1869 pa_sink_input *i;
1870 void *state = NULL;
1871
1872 pa_sink_assert_ref(s);
1873
1874 if (max_rewind == s->thread_info.max_rewind)
1875 return;
1876
1877 s->thread_info.max_rewind = max_rewind;
1878
1879 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1880 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1881 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1882 }
1883
1884 if (s->monitor_source)
1885 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
1886 }
1887
1888 /* Called from main thread */
1889 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
1890 pa_sink_assert_ref(s);
1891
1892 if (PA_SINK_IS_LINKED(s->state))
1893 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
1894 else
1895 pa_sink_set_max_rewind_within_thread(s, max_rewind);
1896 }
1897
1898 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1899 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
1900 void *state = NULL;
1901
1902 pa_sink_assert_ref(s);
1903
1904 if (max_request == s->thread_info.max_request)
1905 return;
1906
1907 s->thread_info.max_request = max_request;
1908
1909 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1910 pa_sink_input *i;
1911
1912 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1913 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1914 }
1915 }
1916
1917 /* Called from main thread */
1918 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
1919 pa_sink_assert_ref(s);
1920
1921 if (PA_SINK_IS_LINKED(s->state))
1922 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
1923 else
1924 pa_sink_set_max_request_within_thread(s, max_request);
1925 }
1926
1927 /* Called from IO thread */
1928 void pa_sink_invalidate_requested_latency(pa_sink *s) {
1929 pa_sink_input *i;
1930 void *state = NULL;
1931
1932 pa_sink_assert_ref(s);
1933
1934 s->thread_info.requested_latency_valid = FALSE;
1935
1936 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1937
1938 if (s->update_requested_latency)
1939 s->update_requested_latency(s);
1940
1941 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1942 if (i->update_sink_requested_latency)
1943 i->update_sink_requested_latency(i);
1944 }
1945 }
1946
1947 /* Called from main thread */
1948 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
1949 pa_sink_assert_ref(s);
1950
1951 /* min_latency == 0: no limit
1952 * min_latency anything else: specified limit
1953 *
1954 * Similar for max_latency */
1955
1956 if (min_latency < ABSOLUTE_MIN_LATENCY)
1957 min_latency = ABSOLUTE_MIN_LATENCY;
1958
1959 if (max_latency <= 0 ||
1960 max_latency > ABSOLUTE_MAX_LATENCY)
1961 max_latency = ABSOLUTE_MAX_LATENCY;
1962
1963 pa_assert(min_latency <= max_latency);
1964
1965 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
1966 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
1967 max_latency == ABSOLUTE_MAX_LATENCY) ||
1968 (s->flags & PA_SINK_DYNAMIC_LATENCY));
1969
1970 if (PA_SINK_IS_LINKED(s->state)) {
1971 pa_usec_t r[2];
1972
1973 r[0] = min_latency;
1974 r[1] = max_latency;
1975
1976 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
1977 } else
1978 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
1979 }
1980
1981 /* Called from main thread */
1982 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
1983 pa_sink_assert_ref(s);
1984 pa_assert(min_latency);
1985 pa_assert(max_latency);
1986
1987 if (PA_SINK_IS_LINKED(s->state)) {
1988 pa_usec_t r[2] = { 0, 0 };
1989
1990 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
1991
1992 *min_latency = r[0];
1993 *max_latency = r[1];
1994 } else {
1995 *min_latency = s->thread_info.min_latency;
1996 *max_latency = s->thread_info.max_latency;
1997 }
1998 }
1999
2000 /* Called from IO thread */
2001 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2002 void *state = NULL;
2003
2004 pa_sink_assert_ref(s);
2005
2006 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2007 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2008 pa_assert(min_latency <= max_latency);
2009
2010 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2011 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2012 max_latency == ABSOLUTE_MAX_LATENCY) ||
2013 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2014
2015 s->thread_info.min_latency = min_latency;
2016 s->thread_info.max_latency = max_latency;
2017
2018 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2019 pa_sink_input *i;
2020
2021 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2022 if (i->update_sink_latency_range)
2023 i->update_sink_latency_range(i);
2024 }
2025
2026 pa_sink_invalidate_requested_latency(s);
2027
2028 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2029 }
2030
2031 /* Called from main context */
2032 size_t pa_sink_get_max_rewind(pa_sink *s) {
2033 size_t r;
2034 pa_sink_assert_ref(s);
2035
2036 if (!PA_SINK_IS_LINKED(s->state))
2037 return s->thread_info.max_rewind;
2038
2039 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2040
2041 return r;
2042 }
2043
2044 /* Called from main context */
2045 size_t pa_sink_get_max_request(pa_sink *s) {
2046 size_t r;
2047 pa_sink_assert_ref(s);
2048
2049 if (!PA_SINK_IS_LINKED(s->state))
2050 return s->thread_info.max_request;
2051
2052 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2053
2054 return r;
2055 }
2056
2057 /* Called from main context */
2058 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2059 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2060
2061 pa_assert(p);
2062
2063 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2064 return TRUE;
2065
2066 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2067
2068 if (pa_streq(ff, "microphone"))
2069 t = "audio-input-microphone";
2070 else if (pa_streq(ff, "webcam"))
2071 t = "camera-web";
2072 else if (pa_streq(ff, "computer"))
2073 t = "computer";
2074 else if (pa_streq(ff, "handset"))
2075 t = "phone";
2076 else if (pa_streq(ff, "portable"))
2077 t = "multimedia-player";
2078 else if (pa_streq(ff, "tv"))
2079 t = "video-display";
2080 }
2081
2082 if (!t)
2083 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2084 if (pa_streq(c, "modem"))
2085 t = "modem";
2086
2087 if (!t) {
2088 if (is_sink)
2089 t = "audio-card";
2090 else
2091 t = "audio-input-microphone";
2092 }
2093
2094 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2095 if (strstr(profile, "analog"))
2096 s = "-analog";
2097 else if (strstr(profile, "iec958"))
2098 s = "-iec958";
2099 else if (strstr(profile, "hdmi"))
2100 s = "-hdmi";
2101 }
2102
2103 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2104
2105 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2106
2107 return TRUE;
2108 }
2109
2110 pa_bool_t pa_device_init_description(pa_proplist *p) {
2111 const char *s;
2112 pa_assert(p);
2113
2114 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2115 return TRUE;
2116
2117 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2118 if (pa_streq(s, "internal")) {
2119 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2120 return TRUE;
2121 }
2122
2123 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2124 if (pa_streq(s, "modem")) {
2125 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2126 return TRUE;
2127 }
2128
2129 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2130 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2131 return TRUE;
2132 }
2133
2134 return FALSE;
2135 }