]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
move flat volume logic into the core. while doing so add n_volume_steps field to...
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36
37 #include <pulsecore/sink-input.h>
38 #include <pulsecore/namereg.h>
39 #include <pulsecore/core-util.h>
40 #include <pulsecore/sample-util.h>
41 #include <pulsecore/core-subscribe.h>
42 #include <pulsecore/log.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/play-memblockq.h>
45
46 #include "sink.h"
47
48 #define MAX_MIX_CHANNELS 32
49 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
50 #define DEFAULT_MIN_LATENCY (4*PA_USEC_PER_MSEC)
51
52 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
53
54 static void sink_free(pa_object *s);
55
56 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
57 pa_assert(data);
58
59 memset(data, 0, sizeof(*data));
60 data->proplist = pa_proplist_new();
61
62 return data;
63 }
64
65 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
66 pa_assert(data);
67
68 pa_xfree(data->name);
69 data->name = pa_xstrdup(name);
70 }
71
72 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
73 pa_assert(data);
74
75 if ((data->sample_spec_is_set = !!spec))
76 data->sample_spec = *spec;
77 }
78
79 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
80 pa_assert(data);
81
82 if ((data->channel_map_is_set = !!map))
83 data->channel_map = *map;
84 }
85
86 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
87 pa_assert(data);
88
89 if ((data->volume_is_set = !!volume))
90 data->volume = *volume;
91 }
92
93 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
94 pa_assert(data);
95
96 data->muted_is_set = TRUE;
97 data->muted = !!mute;
98 }
99
100 void pa_sink_new_data_done(pa_sink_new_data *data) {
101 pa_assert(data);
102
103 pa_xfree(data->name);
104 pa_proplist_free(data->proplist);
105 }
106
107 /* Called from main context */
108 static void reset_callbacks(pa_sink *s) {
109 pa_assert(s);
110
111 s->set_state = NULL;
112 s->get_volume = NULL;
113 s->set_volume = NULL;
114 s->get_mute = NULL;
115 s->set_mute = NULL;
116 s->request_rewind = NULL;
117 s->update_requested_latency = NULL;
118 }
119
120 /* Called from main context */
121 pa_sink* pa_sink_new(
122 pa_core *core,
123 pa_sink_new_data *data,
124 pa_sink_flags_t flags) {
125
126 pa_sink *s;
127 const char *name;
128 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
129 pa_source_new_data source_data;
130 const char *dn;
131
132 pa_assert(core);
133 pa_assert(data);
134 pa_assert(data->name);
135
136 s = pa_msgobject_new(pa_sink);
137
138 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
139 pa_xfree(s);
140 return NULL;
141 }
142
143 pa_sink_new_data_set_name(data, name);
144
145 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
146 pa_xfree(s);
147 pa_namereg_unregister(core, name);
148 return NULL;
149 }
150
151 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
152 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
153
154 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
155
156 if (!data->channel_map_is_set)
157 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
158
159 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
160 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
161
162 if (!data->volume_is_set)
163 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
164
165 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
166 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
167
168 if (!data->muted_is_set)
169 data->muted = FALSE;
170
171 if (data->card)
172 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
173
174 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
175 pa_xfree(s);
176 pa_namereg_unregister(core, name);
177 return NULL;
178 }
179
180 s->parent.parent.free = sink_free;
181 s->parent.process_msg = pa_sink_process_msg;
182
183 s->core = core;
184 s->state = PA_SINK_INIT;
185 s->flags = flags;
186 s->name = pa_xstrdup(name);
187 s->proplist = pa_proplist_copy(data->proplist);
188 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
189 s->module = data->module;
190 s->card = data->card;
191
192 s->sample_spec = data->sample_spec;
193 s->channel_map = data->channel_map;
194
195 s->inputs = pa_idxset_new(NULL, NULL);
196 s->n_corked = 0;
197
198 s->virtual_volume = data->volume;
199 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
200 s->base_volume = PA_VOLUME_NORM;
201 s->n_volume_steps = PA_VOLUME_NORM+1;
202 s->muted = data->muted;
203 s->refresh_volume = s->refresh_muted = FALSE;
204
205 reset_callbacks(s);
206 s->userdata = NULL;
207
208 s->asyncmsgq = NULL;
209 s->rtpoll = NULL;
210
211 pa_silence_memchunk_get(
212 &core->silence_cache,
213 core->mempool,
214 &s->silence,
215 &s->sample_spec,
216 0);
217
218 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
219 s->thread_info.soft_volume = s->soft_volume;
220 s->thread_info.soft_muted = s->muted;
221 s->thread_info.state = s->state;
222 s->thread_info.rewind_nbytes = 0;
223 s->thread_info.rewind_requested = FALSE;
224 s->thread_info.max_rewind = 0;
225 s->thread_info.max_request = 0;
226 s->thread_info.requested_latency_valid = FALSE;
227 s->thread_info.requested_latency = 0;
228 s->thread_info.min_latency = DEFAULT_MIN_LATENCY;
229 s->thread_info.max_latency = 0;
230
231 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
232
233 if (s->card)
234 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
235
236 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s",
237 s->index,
238 s->name,
239 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
240 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map));
241
242 pa_source_new_data_init(&source_data);
243 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
244 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
245 source_data.name = pa_sprintf_malloc("%s.monitor", name);
246 source_data.driver = data->driver;
247 source_data.module = data->module;
248 source_data.card = data->card;
249
250 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
251 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
252 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
253
254 s->monitor_source = pa_source_new(core, &source_data, PA_SOURCE_LATENCY);
255
256 pa_source_new_data_done(&source_data);
257
258 if (!s->monitor_source) {
259 pa_sink_unlink(s);
260 pa_sink_unref(s);
261 return NULL;
262 }
263
264 s->monitor_source->monitor_of = s;
265
266 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
267 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
268
269 return s;
270 }
271
272 /* Called from main context */
273 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
274 int ret;
275 pa_bool_t suspend_change;
276 pa_sink_state_t original_state;
277
278 pa_assert(s);
279
280 if (s->state == state)
281 return 0;
282
283 original_state = s->state;
284
285 suspend_change =
286 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
287 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
288
289 if (s->set_state)
290 if ((ret = s->set_state(s, state)) < 0)
291 return ret;
292
293 if (s->asyncmsgq)
294 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
295
296 if (s->set_state)
297 s->set_state(s, original_state);
298
299 return ret;
300 }
301
302 s->state = state;
303
304 if (suspend_change) {
305 pa_sink_input *i;
306 uint32_t idx;
307
308 /* We're suspending or resuming, tell everyone about it */
309
310 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
311 if (i->suspend)
312 i->suspend(i, state == PA_SINK_SUSPENDED);
313 }
314
315 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
316 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
317 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
318 }
319
320 return 0;
321 }
322
323 /* Called from main context */
324 void pa_sink_put(pa_sink* s) {
325 pa_sink_assert_ref(s);
326
327 pa_assert(s->state == PA_SINK_INIT);
328
329 /* The following fields must be initialized properly when calling _put() */
330 pa_assert(s->asyncmsgq);
331 pa_assert(s->rtpoll);
332 pa_assert(!s->thread_info.min_latency || !s->thread_info.max_latency ||
333 s->thread_info.min_latency <= s->thread_info.max_latency);
334
335 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL)) {
336 s->flags |= PA_SINK_DECIBEL_VOLUME;
337
338 s->thread_info.soft_volume = s->soft_volume;
339 s->thread_info.soft_muted = s->muted;
340 }
341
342 if (s->flags & PA_SINK_DECIBEL_VOLUME)
343 s->n_volume_steps = PA_VOLUME_NORM+1;
344
345 if (s->core->flat_volumes)
346 if (s->flags & PA_SINK_DECIBEL_VOLUME)
347 s->flags |= PA_SINK_FLAT_VOLUME;
348
349 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
350
351 pa_source_put(s->monitor_source);
352
353 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
354 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
355 }
356
357 /* Called from main context */
358 void pa_sink_unlink(pa_sink* s) {
359 pa_bool_t linked;
360 pa_sink_input *i, *j = NULL;
361
362 pa_assert(s);
363
364 /* Please note that pa_sink_unlink() does more than simply
365 * reversing pa_sink_put(). It also undoes the registrations
366 * already done in pa_sink_new()! */
367
368 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
369 * may be called multiple times on the same sink without bad
370 * effects. */
371
372 linked = PA_SINK_IS_LINKED(s->state);
373
374 if (linked)
375 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
376
377 if (s->state != PA_SINK_UNLINKED)
378 pa_namereg_unregister(s->core, s->name);
379 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
380
381 if (s->card)
382 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
383
384 while ((i = pa_idxset_first(s->inputs, NULL))) {
385 pa_assert(i != j);
386 pa_sink_input_kill(i);
387 j = i;
388 }
389
390 if (linked)
391 sink_set_state(s, PA_SINK_UNLINKED);
392 else
393 s->state = PA_SINK_UNLINKED;
394
395 reset_callbacks(s);
396
397 if (s->monitor_source)
398 pa_source_unlink(s->monitor_source);
399
400 if (linked) {
401 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
402 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
403 }
404 }
405
406 /* Called from main context */
407 static void sink_free(pa_object *o) {
408 pa_sink *s = PA_SINK(o);
409 pa_sink_input *i;
410
411 pa_assert(s);
412 pa_assert(pa_sink_refcnt(s) == 0);
413
414 if (PA_SINK_IS_LINKED(s->state))
415 pa_sink_unlink(s);
416
417 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
418
419 if (s->monitor_source) {
420 pa_source_unref(s->monitor_source);
421 s->monitor_source = NULL;
422 }
423
424 pa_idxset_free(s->inputs, NULL, NULL);
425
426 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
427 pa_sink_input_unref(i);
428
429 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
430
431 if (s->silence.memblock)
432 pa_memblock_unref(s->silence.memblock);
433
434 pa_xfree(s->name);
435 pa_xfree(s->driver);
436
437 if (s->proplist)
438 pa_proplist_free(s->proplist);
439
440 pa_xfree(s);
441 }
442
443 /* Called from main context */
444 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
445 pa_sink_assert_ref(s);
446
447 s->asyncmsgq = q;
448
449 if (s->monitor_source)
450 pa_source_set_asyncmsgq(s->monitor_source, q);
451 }
452
453 /* Called from main context */
454 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
455 pa_sink_assert_ref(s);
456
457 s->rtpoll = p;
458 if (s->monitor_source)
459 pa_source_set_rtpoll(s->monitor_source, p);
460 }
461
462 /* Called from main context */
463 int pa_sink_update_status(pa_sink*s) {
464 pa_sink_assert_ref(s);
465 pa_assert(PA_SINK_IS_LINKED(s->state));
466
467 if (s->state == PA_SINK_SUSPENDED)
468 return 0;
469
470 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
471 }
472
473 /* Called from main context */
474 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend) {
475 pa_sink_assert_ref(s);
476 pa_assert(PA_SINK_IS_LINKED(s->state));
477
478 if (suspend)
479 return sink_set_state(s, PA_SINK_SUSPENDED);
480 else
481 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
482 }
483
484 /* Called from main context */
485 pa_queue *pa_sink_move_all_start(pa_sink *s) {
486 pa_queue *q;
487 pa_sink_input *i, *n;
488 uint32_t idx;
489
490 pa_sink_assert_ref(s);
491 pa_assert(PA_SINK_IS_LINKED(s->state));
492
493 q = pa_queue_new();
494
495 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
496 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
497
498 if (pa_sink_input_start_move(i) >= 0)
499 pa_queue_push(q, pa_sink_input_ref(i));
500 }
501
502 return q;
503 }
504
505 /* Called from main context */
506 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q) {
507 pa_sink_input *i;
508
509 pa_sink_assert_ref(s);
510 pa_assert(PA_SINK_IS_LINKED(s->state));
511 pa_assert(q);
512
513 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
514 if (pa_sink_input_finish_move(i, s) < 0)
515 pa_sink_input_unlink(i);
516
517 pa_sink_input_unref(i);
518 }
519
520 pa_queue_free(q, NULL, NULL);
521 }
522
523 /* Called from main context */
524 void pa_sink_move_all_fail(pa_queue *q) {
525 pa_sink_input *i;
526 pa_assert(q);
527
528 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
529 pa_sink_input_unlink(i);
530 pa_sink_input_unref(i);
531 }
532
533 pa_queue_free(q, NULL, NULL);
534 }
535
536 /* Called from IO thread context */
537 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
538 pa_sink_input *i;
539 void *state = NULL;
540 pa_sink_assert_ref(s);
541 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
542
543 s->thread_info.rewind_nbytes = 0;
544 s->thread_info.rewind_requested = FALSE;
545
546 if (nbytes > 0)
547 pa_log_debug("Processing rewind...");
548
549 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
550 pa_sink_input_assert_ref(i);
551 pa_sink_input_process_rewind(i, nbytes);
552 }
553
554 if (nbytes > 0)
555 if (s->monitor_source && PA_SOURCE_IS_OPENED(s->monitor_source->thread_info.state))
556 pa_source_process_rewind(s->monitor_source, nbytes);
557 }
558
559 /* Called from IO thread context */
560 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
561 pa_sink_input *i;
562 unsigned n = 0;
563 void *state = NULL;
564 size_t mixlength = *length;
565
566 pa_sink_assert_ref(s);
567 pa_assert(info);
568
569 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
570 pa_sink_input_assert_ref(i);
571
572 if (pa_sink_input_peek(i, *length, &info->chunk, &info->volume) < 0)
573 continue;
574
575 if (mixlength == 0 || info->chunk.length < mixlength)
576 mixlength = info->chunk.length;
577
578 if (pa_memblock_is_silence(info->chunk.memblock)) {
579 pa_memblock_unref(info->chunk.memblock);
580 continue;
581 }
582
583 info->userdata = pa_sink_input_ref(i);
584
585 pa_assert(info->chunk.memblock);
586 pa_assert(info->chunk.length > 0);
587
588 info++;
589 n++;
590 maxinfo--;
591 }
592
593 if (mixlength > 0)
594 *length = mixlength;
595
596 return n;
597 }
598
599 /* Called from IO thread context */
600 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
601 pa_sink_input *i;
602 void *state = NULL;
603 unsigned p = 0;
604 unsigned n_unreffed = 0;
605
606 pa_sink_assert_ref(s);
607 pa_assert(result);
608 pa_assert(result->memblock);
609 pa_assert(result->length > 0);
610
611 /* We optimize for the case where the order of the inputs has not changed */
612
613 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
614 unsigned j;
615 pa_mix_info* m = NULL;
616
617 pa_sink_input_assert_ref(i);
618
619 /* Let's try to find the matching entry info the pa_mix_info array */
620 for (j = 0; j < n; j ++) {
621
622 if (info[p].userdata == i) {
623 m = info + p;
624 break;
625 }
626
627 p++;
628 if (p >= n)
629 p = 0;
630 }
631
632 /* Drop read data */
633 pa_sink_input_drop(i, result->length);
634
635 if (s->monitor_source && PA_SOURCE_IS_OPENED(pa_source_get_state(s->monitor_source))) {
636
637 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
638 void *ostate = NULL;
639 pa_source_output *o;
640 pa_memchunk c;
641
642 if (m && m->chunk.memblock) {
643 c = m->chunk;
644 pa_memblock_ref(c.memblock);
645 pa_assert(result->length <= c.length);
646 c.length = result->length;
647
648 pa_memchunk_make_writable(&c, 0);
649 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
650 } else {
651 c = s->silence;
652 pa_memblock_ref(c.memblock);
653 pa_assert(result->length <= c.length);
654 c.length = result->length;
655 }
656
657 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
658 pa_source_output_assert_ref(o);
659 pa_assert(o->direct_on_input == i);
660 pa_source_post_direct(s->monitor_source, o, &c);
661 }
662
663 pa_memblock_unref(c.memblock);
664 }
665 }
666
667 if (m) {
668 if (m->chunk.memblock)
669 pa_memblock_unref(m->chunk.memblock);
670 pa_memchunk_reset(&m->chunk);
671
672 pa_sink_input_unref(m->userdata);
673 m->userdata = NULL;
674
675 n_unreffed += 1;
676 }
677 }
678
679 /* Now drop references to entries that are included in the
680 * pa_mix_info array but don't exist anymore */
681
682 if (n_unreffed < n) {
683 for (; n > 0; info++, n--) {
684 if (info->userdata)
685 pa_sink_input_unref(info->userdata);
686 if (info->chunk.memblock)
687 pa_memblock_unref(info->chunk.memblock);
688 }
689 }
690
691 if (s->monitor_source && PA_SOURCE_IS_OPENED(pa_source_get_state(s->monitor_source)))
692 pa_source_post(s->monitor_source, result);
693 }
694
695 /* Called from IO thread context */
696 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
697 pa_mix_info info[MAX_MIX_CHANNELS];
698 unsigned n;
699 size_t block_size_max;
700
701 pa_sink_assert_ref(s);
702 pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
703 pa_assert(pa_frame_aligned(length, &s->sample_spec));
704 pa_assert(result);
705
706 pa_sink_ref(s);
707
708 pa_assert(!s->thread_info.rewind_requested);
709 pa_assert(s->thread_info.rewind_nbytes == 0);
710
711 if (length <= 0)
712 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
713
714 block_size_max = pa_mempool_block_size_max(s->core->mempool);
715 if (length > block_size_max)
716 length = pa_frame_align(block_size_max, &s->sample_spec);
717
718 pa_assert(length > 0);
719
720 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
721
722 if (n == 0) {
723
724 *result = s->silence;
725 pa_memblock_ref(result->memblock);
726
727 if (result->length > length)
728 result->length = length;
729
730 } else if (n == 1) {
731 pa_cvolume volume;
732
733 *result = info[0].chunk;
734 pa_memblock_ref(result->memblock);
735
736 if (result->length > length)
737 result->length = length;
738
739 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
740
741 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
742 pa_memchunk_make_writable(result, 0);
743 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
744 pa_silence_memchunk(result, &s->sample_spec);
745 else
746 pa_volume_memchunk(result, &s->sample_spec, &volume);
747 }
748 } else {
749 void *ptr;
750 result->memblock = pa_memblock_new(s->core->mempool, length);
751
752 ptr = pa_memblock_acquire(result->memblock);
753 result->length = pa_mix(info, n,
754 ptr, length,
755 &s->sample_spec,
756 &s->thread_info.soft_volume,
757 s->thread_info.soft_muted);
758 pa_memblock_release(result->memblock);
759
760 result->index = 0;
761 }
762
763 inputs_drop(s, info, n, result);
764
765 pa_sink_unref(s);
766 }
767
768 /* Called from IO thread context */
769 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
770 pa_mix_info info[MAX_MIX_CHANNELS];
771 unsigned n;
772 size_t length, block_size_max;
773
774 pa_sink_assert_ref(s);
775 pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
776 pa_assert(target);
777 pa_assert(target->memblock);
778 pa_assert(target->length > 0);
779 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
780
781 pa_sink_ref(s);
782
783 pa_assert(!s->thread_info.rewind_requested);
784 pa_assert(s->thread_info.rewind_nbytes == 0);
785
786 length = target->length;
787 block_size_max = pa_mempool_block_size_max(s->core->mempool);
788 if (length > block_size_max)
789 length = pa_frame_align(block_size_max, &s->sample_spec);
790
791 pa_assert(length > 0);
792
793 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
794
795 if (n == 0) {
796 if (target->length > length)
797 target->length = length;
798
799 pa_silence_memchunk(target, &s->sample_spec);
800 } else if (n == 1) {
801 pa_cvolume volume;
802
803 if (target->length > length)
804 target->length = length;
805
806 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
807
808 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
809 pa_silence_memchunk(target, &s->sample_spec);
810 else {
811 pa_memchunk vchunk;
812
813 vchunk = info[0].chunk;
814 pa_memblock_ref(vchunk.memblock);
815
816 if (vchunk.length > length)
817 vchunk.length = length;
818
819 if (!pa_cvolume_is_norm(&volume)) {
820 pa_memchunk_make_writable(&vchunk, 0);
821 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
822 }
823
824 pa_memchunk_memcpy(target, &vchunk);
825 pa_memblock_unref(vchunk.memblock);
826 }
827
828 } else {
829 void *ptr;
830
831 ptr = pa_memblock_acquire(target->memblock);
832
833 target->length = pa_mix(info, n,
834 (uint8_t*) ptr + target->index, length,
835 &s->sample_spec,
836 &s->thread_info.soft_volume,
837 s->thread_info.soft_muted);
838
839 pa_memblock_release(target->memblock);
840 }
841
842 inputs_drop(s, info, n, target);
843
844 pa_sink_unref(s);
845 }
846
847 /* Called from IO thread context */
848 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
849 pa_memchunk chunk;
850 size_t l, d;
851
852 pa_sink_assert_ref(s);
853 pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
854 pa_assert(target);
855 pa_assert(target->memblock);
856 pa_assert(target->length > 0);
857 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
858
859 pa_sink_ref(s);
860
861 pa_assert(!s->thread_info.rewind_requested);
862 pa_assert(s->thread_info.rewind_nbytes == 0);
863
864 l = target->length;
865 d = 0;
866 while (l > 0) {
867 chunk = *target;
868 chunk.index += d;
869 chunk.length -= d;
870
871 pa_sink_render_into(s, &chunk);
872
873 d += chunk.length;
874 l -= chunk.length;
875 }
876
877 pa_sink_unref(s);
878 }
879
880 /* Called from IO thread context */
881 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
882 pa_sink_assert_ref(s);
883 pa_assert(PA_SINK_IS_OPENED(s->thread_info.state));
884 pa_assert(length > 0);
885 pa_assert(pa_frame_aligned(length, &s->sample_spec));
886 pa_assert(result);
887
888 pa_assert(!s->thread_info.rewind_requested);
889 pa_assert(s->thread_info.rewind_nbytes == 0);
890
891 /*** This needs optimization ***/
892
893 result->index = 0;
894 result->length = length;
895 result->memblock = pa_memblock_new(s->core->mempool, length);
896
897 pa_sink_render_into_full(s, result);
898 }
899
900 /* Called from main thread */
901 pa_usec_t pa_sink_get_latency(pa_sink *s) {
902 pa_usec_t usec = 0;
903
904 pa_sink_assert_ref(s);
905 pa_assert(PA_SINK_IS_LINKED(s->state));
906
907 /* The returned value is supposed to be in the time domain of the sound card! */
908
909 if (!PA_SINK_IS_OPENED(s->state))
910 return 0;
911
912 if (!(s->flags & PA_SINK_LATENCY))
913 return 0;
914
915 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
916
917 return usec;
918 }
919
920 /* Called from main thread */
921 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
922 pa_sink_input *i;
923 uint32_t idx;
924
925 /* This is called whenever a sink input volume changes and we
926 * might need to fix up the sink volume accordingly. Please note
927 * that we don't actually update the sinks volume here, we only
928 * return how it needs to be updated. The caller should then call
929 * pa_sink_set_flat_volume().*/
930
931 pa_cvolume_mute(new_volume, s->channel_map.channels);
932
933 /* First let's determine the new maximum volume of all inputs
934 * connected to this sink */
935 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
936 unsigned c;
937 pa_cvolume remapped_volume;
938
939 remapped_volume = i->virtual_volume;
940 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
941
942 for (c = 0; c < new_volume->channels; c++)
943 if (remapped_volume.values[c] > new_volume->values[c])
944 new_volume->values[c] = remapped_volume.values[c];
945 }
946
947 /* Then, let's update the soft volumes of all inputs connected
948 * to this sink */
949 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
950 pa_cvolume remapped_new_volume;
951
952 remapped_new_volume = *new_volume;
953 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
954 pa_sw_cvolume_divide(&i->soft_volume, &i->virtual_volume, &remapped_new_volume);
955
956 /* Hooks have the ability to play games with i->soft_volume */
957 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
958
959 /* We don't issue PA_SINK_INPUT_MESSAGE_SET_VOLUME because
960 * we want the update to have atomically with the sink
961 * volume update, hence we do it within the
962 * pa_sink_set_flat_volume() call below*/
963 }
964 }
965
966 /* Called from main thread */
967 void pa_sink_propagate_flat_volume(pa_sink *s, const pa_cvolume *old_volume) {
968 pa_sink_input *i;
969 uint32_t idx;
970
971 pa_sink_assert_ref(s);
972 pa_assert(PA_SINK_IS_LINKED(s->state));
973 pa_assert(old_volume);
974 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
975
976 /* This is called whenever the sink volume changes that is not
977 * caused by a sink input volume change. We need to fix up the
978 * sink input volumes accordingly */
979
980 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
981 pa_cvolume remapped_old_volume, remapped_new_volume, fixed_volume;
982 unsigned c;
983
984 remapped_new_volume = s->virtual_volume;
985 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
986
987 remapped_old_volume = *old_volume;
988 pa_cvolume_remap(&remapped_old_volume, &s->channel_map, &i->channel_map);
989
990 for (c = 0; c < i->sample_spec.channels; c++)
991
992 if (remapped_old_volume.values[c] == PA_VOLUME_MUTED)
993 fixed_volume.values[c] = PA_VOLUME_MUTED;
994 else
995 fixed_volume.values[c] = (pa_volume_t)
996 ((uint64_t) i->virtual_volume.values[c] *
997 (uint64_t) remapped_new_volume.values[c] /
998 (uint64_t) remapped_old_volume.values[c]);
999
1000 fixed_volume.channels = i->virtual_volume.channels;
1001
1002 if (!pa_cvolume_equal(&fixed_volume, &i->virtual_volume)) {
1003 i->virtual_volume = fixed_volume;
1004
1005 /* The virtual volume changed, let's tell people so */
1006 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1007 }
1008 }
1009 }
1010
1011 /* Called from main thread */
1012 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg) {
1013 pa_cvolume old_virtual_volume;
1014 pa_bool_t virtual_volume_changed;
1015
1016 pa_sink_assert_ref(s);
1017 pa_assert(PA_SINK_IS_LINKED(s->state));
1018 pa_assert(volume);
1019 pa_assert(pa_cvolume_valid(volume));
1020 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1021
1022 old_virtual_volume = s->virtual_volume;
1023 s->virtual_volume = *volume;
1024 virtual_volume_changed = !pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume);
1025
1026 /* Propagate this volume change back to the inputs */
1027 if (virtual_volume_changed)
1028 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1029 pa_sink_propagate_flat_volume(s, &old_virtual_volume);
1030
1031 if (s->set_volume) {
1032 /* If we have a function set_volume(), then we do not apply a
1033 * soft volume by default. However, set_volume() is apply one
1034 * to s->soft_volume */
1035
1036 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1037 s->set_volume(s);
1038
1039 } else
1040 /* If we have no function set_volume(), then the soft volume
1041 * becomes the virtual volume */
1042 s->soft_volume = s->virtual_volume;
1043
1044 /* This tells the sink that soft and/or virtual volume changed */
1045 if (sendmsg)
1046 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1047
1048 if (virtual_volume_changed)
1049 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1050 }
1051
1052 /* Called from main thread. Only to be called by sink implementor */
1053 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1054 pa_sink_assert_ref(s);
1055 pa_assert(volume);
1056
1057 s->soft_volume = *volume;
1058
1059 if (PA_SINK_IS_LINKED(s->state))
1060 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1061 else
1062 s->thread_info.soft_volume = *volume;
1063 }
1064
1065 /* Called from main thread */
1066 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1067 pa_sink_assert_ref(s);
1068
1069 if (s->refresh_volume || force_refresh) {
1070 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1071
1072 if (s->get_volume)
1073 s->get_volume(s);
1074
1075 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1076
1077 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1078
1079 if (s->flags & PA_SINK_FLAT_VOLUME)
1080 pa_sink_propagate_flat_volume(s, &old_virtual_volume);
1081
1082 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1083 }
1084 }
1085
1086 return &s->virtual_volume;
1087 }
1088
1089 /* Called from main thread */
1090 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1091 pa_bool_t old_muted;
1092
1093 pa_sink_assert_ref(s);
1094 pa_assert(PA_SINK_IS_LINKED(s->state));
1095
1096 old_muted = s->muted;
1097 s->muted = mute;
1098
1099 if (s->set_mute)
1100 s->set_mute(s);
1101
1102 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1103
1104 if (old_muted != s->muted)
1105 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1106 }
1107
1108 /* Called from main thread */
1109 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1110
1111 pa_sink_assert_ref(s);
1112
1113 if (s->refresh_muted || force_refresh) {
1114 pa_bool_t old_muted = s->muted;
1115
1116 if (s->get_mute)
1117 s->get_mute(s);
1118
1119 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1120
1121 if (old_muted != s->muted)
1122 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1123 }
1124
1125 return s->muted;
1126 }
1127
1128 /* Called from main thread */
1129 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1130
1131 pa_sink_assert_ref(s);
1132
1133 pa_proplist_update(s->proplist, mode, p);
1134
1135 if (PA_SINK_IS_LINKED(s->state)) {
1136 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1137 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1138 }
1139
1140 return TRUE;
1141 }
1142
1143 /* Called from main thread */
1144 void pa_sink_set_description(pa_sink *s, const char *description) {
1145 const char *old;
1146 pa_sink_assert_ref(s);
1147
1148 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1149 return;
1150
1151 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1152
1153 if (old && description && !strcmp(old, description))
1154 return;
1155
1156 if (description)
1157 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1158 else
1159 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1160
1161 if (s->monitor_source) {
1162 char *n;
1163
1164 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1165 pa_source_set_description(s->monitor_source, n);
1166 pa_xfree(n);
1167 }
1168
1169 if (PA_SINK_IS_LINKED(s->state)) {
1170 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1171 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1172 }
1173 }
1174
1175 /* Called from main thread */
1176 unsigned pa_sink_linked_by(pa_sink *s) {
1177 unsigned ret;
1178
1179 pa_sink_assert_ref(s);
1180 pa_assert(PA_SINK_IS_LINKED(s->state));
1181
1182 ret = pa_idxset_size(s->inputs);
1183
1184 /* We add in the number of streams connected to us here. Please
1185 * note the asymmmetry to pa_sink_used_by()! */
1186
1187 if (s->monitor_source)
1188 ret += pa_source_linked_by(s->monitor_source);
1189
1190 return ret;
1191 }
1192
1193 /* Called from main thread */
1194 unsigned pa_sink_used_by(pa_sink *s) {
1195 unsigned ret;
1196
1197 pa_sink_assert_ref(s);
1198 pa_assert(PA_SINK_IS_LINKED(s->state));
1199
1200 ret = pa_idxset_size(s->inputs);
1201 pa_assert(ret >= s->n_corked);
1202
1203 /* Streams connected to our monitor source do not matter for
1204 * pa_sink_used_by()!.*/
1205
1206 return ret - s->n_corked;
1207 }
1208
1209 /* Called from main thread */
1210 unsigned pa_sink_check_suspend(pa_sink *s) {
1211 unsigned ret;
1212 pa_sink_input *i;
1213 uint32_t idx;
1214
1215 pa_sink_assert_ref(s);
1216
1217 if (!PA_SINK_IS_LINKED(s->state))
1218 return 0;
1219
1220 ret = 0;
1221
1222 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1223 pa_sink_input_state_t st;
1224
1225 st = pa_sink_input_get_state(i);
1226 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1227
1228 if (st == PA_SINK_INPUT_CORKED)
1229 continue;
1230
1231 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1232 continue;
1233
1234 ret ++;
1235 }
1236
1237 if (s->monitor_source)
1238 ret += pa_source_check_suspend(s->monitor_source);
1239
1240 return ret;
1241 }
1242
1243 /* Called from the IO thread */
1244 static void sync_input_volumes_within_thread(pa_sink *s) {
1245 pa_sink_input *i;
1246 void *state = NULL;
1247
1248 pa_sink_assert_ref(s);
1249
1250 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1251 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1252 continue;
1253
1254 i->thread_info.soft_volume = i->soft_volume;
1255 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1256 }
1257 }
1258
1259 /* Called from IO thread, except when it is not */
1260 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1261 pa_sink *s = PA_SINK(o);
1262 pa_sink_assert_ref(s);
1263
1264 switch ((pa_sink_message_t) code) {
1265
1266 case PA_SINK_MESSAGE_ADD_INPUT: {
1267 pa_sink_input *i = PA_SINK_INPUT(userdata);
1268
1269 /* If you change anything here, make sure to change the
1270 * sink input handling a few lines down at
1271 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1272
1273 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1274
1275 /* Since the caller sleeps in pa_sink_input_put(), we can
1276 * safely access data outside of thread_info even though
1277 * it is mutable */
1278
1279 if ((i->thread_info.sync_prev = i->sync_prev)) {
1280 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1281 pa_assert(i->sync_prev->sync_next == i);
1282 i->thread_info.sync_prev->thread_info.sync_next = i;
1283 }
1284
1285 if ((i->thread_info.sync_next = i->sync_next)) {
1286 pa_assert(i->sink == i->thread_info.sync_next->sink);
1287 pa_assert(i->sync_next->sync_prev == i);
1288 i->thread_info.sync_next->thread_info.sync_prev = i;
1289 }
1290
1291 pa_assert(!i->thread_info.attached);
1292 i->thread_info.attached = TRUE;
1293
1294 if (i->attach)
1295 i->attach(i);
1296
1297 pa_sink_input_set_state_within_thread(i, i->state);
1298
1299 /* The requested latency of the sink input needs to be
1300 * fixed up and then configured on the sink */
1301
1302 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1303 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1304
1305 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1306 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1307
1308 /* We don't rewind here automatically. This is left to the
1309 * sink input implementor because some sink inputs need a
1310 * slow start, i.e. need some time to buffer client
1311 * samples before beginning streaming. */
1312
1313 /* In flat volume mode we need to update the volume as
1314 * well */
1315 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1316 }
1317
1318 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1319 pa_sink_input *i = PA_SINK_INPUT(userdata);
1320
1321 /* If you change anything here, make sure to change the
1322 * sink input handling a few lines down at
1323 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1324
1325 if (i->detach)
1326 i->detach(i);
1327
1328 pa_sink_input_set_state_within_thread(i, i->state);
1329
1330 pa_assert(i->thread_info.attached);
1331 i->thread_info.attached = FALSE;
1332
1333 /* Since the caller sleeps in pa_sink_input_unlink(),
1334 * we can safely access data outside of thread_info even
1335 * though it is mutable */
1336
1337 pa_assert(!i->sync_prev);
1338 pa_assert(!i->sync_next);
1339
1340 if (i->thread_info.sync_prev) {
1341 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1342 i->thread_info.sync_prev = NULL;
1343 }
1344
1345 if (i->thread_info.sync_next) {
1346 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1347 i->thread_info.sync_next = NULL;
1348 }
1349
1350 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1351 pa_sink_input_unref(i);
1352
1353 pa_sink_invalidate_requested_latency(s);
1354 pa_sink_request_rewind(s, (size_t) -1);
1355
1356 /* In flat volume mode we need to update the volume as
1357 * well */
1358 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1359 }
1360
1361 case PA_SINK_MESSAGE_START_MOVE: {
1362 pa_sink_input *i = PA_SINK_INPUT(userdata);
1363
1364 /* We don't support moving synchronized streams. */
1365 pa_assert(!i->sync_prev);
1366 pa_assert(!i->sync_next);
1367 pa_assert(!i->thread_info.sync_next);
1368 pa_assert(!i->thread_info.sync_prev);
1369
1370 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1371 pa_usec_t usec = 0;
1372 size_t sink_nbytes, total_nbytes;
1373
1374 /* Get the latency of the sink */
1375 if (!(s->flags & PA_SINK_LATENCY) ||
1376 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1377 usec = 0;
1378
1379 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1380 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1381
1382 if (total_nbytes > 0) {
1383 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1384 i->thread_info.rewrite_flush = TRUE;
1385 pa_sink_input_process_rewind(i, sink_nbytes);
1386 }
1387 }
1388
1389 if (i->detach)
1390 i->detach(i);
1391
1392 pa_assert(i->thread_info.attached);
1393 i->thread_info.attached = FALSE;
1394
1395 /* Let's remove the sink input ...*/
1396 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1397 pa_sink_input_unref(i);
1398
1399 pa_sink_invalidate_requested_latency(s);
1400
1401 pa_log_debug("Requesting rewind due to started move");
1402 pa_sink_request_rewind(s, (size_t) -1);
1403
1404 /* In flat volume mode we need to update the volume as
1405 * well */
1406 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1407 }
1408
1409 case PA_SINK_MESSAGE_FINISH_MOVE: {
1410 pa_sink_input *i = PA_SINK_INPUT(userdata);
1411
1412 /* We don't support moving synchronized streams. */
1413 pa_assert(!i->sync_prev);
1414 pa_assert(!i->sync_next);
1415 pa_assert(!i->thread_info.sync_next);
1416 pa_assert(!i->thread_info.sync_prev);
1417
1418 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1419
1420 pa_assert(!i->thread_info.attached);
1421 i->thread_info.attached = TRUE;
1422
1423 if (i->attach)
1424 i->attach(i);
1425
1426 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1427 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1428
1429 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1430 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1431
1432 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1433 pa_usec_t usec = 0;
1434 size_t nbytes;
1435
1436 /* Get the latency of the sink */
1437 if (!(s->flags & PA_SINK_LATENCY) ||
1438 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1439 usec = 0;
1440
1441 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1442
1443 if (nbytes > 0)
1444 pa_sink_input_drop(i, nbytes);
1445
1446 pa_log_debug("Requesting rewind due to finished move");
1447 pa_sink_request_rewind(s, nbytes);
1448 }
1449
1450 /* In flat volume mode we need to update the volume as
1451 * well */
1452 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1453 }
1454
1455 case PA_SINK_MESSAGE_SET_VOLUME:
1456
1457 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1458 s->thread_info.soft_volume = s->soft_volume;
1459 pa_sink_request_rewind(s, (size_t) -1);
1460 }
1461
1462 if (s->flags & PA_SINK_FLAT_VOLUME)
1463 sync_input_volumes_within_thread(s);
1464
1465 return 0;
1466
1467 case PA_SINK_MESSAGE_GET_VOLUME:
1468 return 0;
1469
1470 case PA_SINK_MESSAGE_SET_MUTE:
1471
1472 if (!s->thread_info.soft_muted != s->muted) {
1473 s->thread_info.soft_muted = s->muted;
1474 pa_sink_request_rewind(s, (size_t) -1);
1475 }
1476
1477 return 0;
1478
1479 case PA_SINK_MESSAGE_GET_MUTE:
1480 return 0;
1481
1482 case PA_SINK_MESSAGE_SET_STATE:
1483
1484 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1485 return 0;
1486
1487 case PA_SINK_MESSAGE_DETACH:
1488
1489 /* Detach all streams */
1490 pa_sink_detach_within_thread(s);
1491 return 0;
1492
1493 case PA_SINK_MESSAGE_ATTACH:
1494
1495 /* Reattach all streams */
1496 pa_sink_attach_within_thread(s);
1497 return 0;
1498
1499 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1500
1501 pa_usec_t *usec = userdata;
1502 *usec = pa_sink_get_requested_latency_within_thread(s);
1503
1504 if (*usec == (pa_usec_t) -1)
1505 *usec = s->thread_info.max_latency;
1506
1507 return 0;
1508 }
1509
1510 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1511 pa_usec_t *r = userdata;
1512
1513 pa_sink_update_latency_range(s, r[0], r[1]);
1514
1515 return 0;
1516 }
1517
1518 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1519 pa_usec_t *r = userdata;
1520
1521 r[0] = s->thread_info.min_latency;
1522 r[1] = s->thread_info.max_latency;
1523
1524 return 0;
1525 }
1526
1527 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1528
1529 *((size_t*) userdata) = s->thread_info.max_rewind;
1530 return 0;
1531
1532 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1533
1534 *((size_t*) userdata) = s->thread_info.max_request;
1535 return 0;
1536
1537 case PA_SINK_MESSAGE_GET_LATENCY:
1538 case PA_SINK_MESSAGE_MAX:
1539 ;
1540 }
1541
1542 return -1;
1543 }
1544
1545 /* Called from main thread */
1546 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend) {
1547 pa_sink *sink;
1548 uint32_t idx;
1549 int ret = 0;
1550
1551 pa_core_assert_ref(c);
1552
1553 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx)))
1554 ret -= pa_sink_suspend(sink, suspend) < 0;
1555
1556 return ret;
1557 }
1558
1559 /* Called from main thread */
1560 void pa_sink_detach(pa_sink *s) {
1561 pa_sink_assert_ref(s);
1562 pa_assert(PA_SINK_IS_LINKED(s->state));
1563
1564 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1565 }
1566
1567 /* Called from main thread */
1568 void pa_sink_attach(pa_sink *s) {
1569 pa_sink_assert_ref(s);
1570 pa_assert(PA_SINK_IS_LINKED(s->state));
1571
1572 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1573 }
1574
1575 /* Called from IO thread */
1576 void pa_sink_detach_within_thread(pa_sink *s) {
1577 pa_sink_input *i;
1578 void *state = NULL;
1579
1580 pa_sink_assert_ref(s);
1581 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1582
1583 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1584 if (i->detach)
1585 i->detach(i);
1586
1587 if (s->monitor_source)
1588 pa_source_detach_within_thread(s->monitor_source);
1589 }
1590
1591 /* Called from IO thread */
1592 void pa_sink_attach_within_thread(pa_sink *s) {
1593 pa_sink_input *i;
1594 void *state = NULL;
1595
1596 pa_sink_assert_ref(s);
1597 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1598
1599 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1600 if (i->attach)
1601 i->attach(i);
1602
1603 if (s->monitor_source)
1604 pa_source_attach_within_thread(s->monitor_source);
1605 }
1606
1607 /* Called from IO thread */
1608 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1609 pa_sink_assert_ref(s);
1610 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1611
1612 if (nbytes == (size_t) -1)
1613 nbytes = s->thread_info.max_rewind;
1614
1615 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1616
1617 if (s->thread_info.rewind_requested &&
1618 nbytes <= s->thread_info.rewind_nbytes)
1619 return;
1620
1621 s->thread_info.rewind_nbytes = nbytes;
1622 s->thread_info.rewind_requested = TRUE;
1623
1624 if (s->request_rewind)
1625 s->request_rewind(s);
1626 }
1627
1628 /* Called from IO thread */
1629 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1630 pa_usec_t result = (pa_usec_t) -1;
1631 pa_sink_input *i;
1632 void *state = NULL;
1633 pa_usec_t monitor_latency;
1634
1635 pa_sink_assert_ref(s);
1636
1637 if (s->thread_info.requested_latency_valid)
1638 return s->thread_info.requested_latency;
1639
1640 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1641
1642 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1643 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1644 result = i->thread_info.requested_sink_latency;
1645
1646 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1647
1648 if (monitor_latency != (pa_usec_t) -1 &&
1649 (result == (pa_usec_t) -1 || result > monitor_latency))
1650 result = monitor_latency;
1651
1652 if (result != (pa_usec_t) -1) {
1653 if (s->thread_info.max_latency > 0 && result > s->thread_info.max_latency)
1654 result = s->thread_info.max_latency;
1655
1656 if (s->thread_info.min_latency > 0 && result < s->thread_info.min_latency)
1657 result = s->thread_info.min_latency;
1658 }
1659
1660 s->thread_info.requested_latency = result;
1661 s->thread_info.requested_latency_valid = TRUE;
1662
1663 return result;
1664 }
1665
1666 /* Called from main thread */
1667 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1668 pa_usec_t usec = 0;
1669
1670 pa_sink_assert_ref(s);
1671 pa_assert(PA_SINK_IS_LINKED(s->state));
1672
1673 if (!PA_SINK_IS_OPENED(s->state))
1674 return 0;
1675
1676 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1677 return usec;
1678 }
1679
1680 /* Called from IO thread */
1681 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
1682 pa_sink_input *i;
1683 void *state = NULL;
1684
1685 pa_sink_assert_ref(s);
1686
1687 if (max_rewind == s->thread_info.max_rewind)
1688 return;
1689
1690 s->thread_info.max_rewind = max_rewind;
1691
1692 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1693 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1694 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1695 }
1696
1697 if (s->monitor_source)
1698 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
1699 }
1700
1701 /* Called from IO thread */
1702 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
1703 void *state = NULL;
1704
1705 pa_sink_assert_ref(s);
1706
1707 if (max_request == s->thread_info.max_request)
1708 return;
1709
1710 s->thread_info.max_request = max_request;
1711
1712 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1713 pa_sink_input *i;
1714
1715 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1716 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1717 }
1718 }
1719
1720 /* Called from IO thread */
1721 void pa_sink_invalidate_requested_latency(pa_sink *s) {
1722 pa_sink_input *i;
1723 void *state = NULL;
1724
1725 pa_sink_assert_ref(s);
1726
1727 s->thread_info.requested_latency_valid = FALSE;
1728
1729 if (s->update_requested_latency)
1730 s->update_requested_latency(s);
1731
1732 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1733 if (i->update_sink_requested_latency)
1734 i->update_sink_requested_latency(i);
1735 }
1736
1737 /* Called from main thread */
1738 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
1739 pa_sink_assert_ref(s);
1740
1741 /* min_latency == 0: no limit
1742 * min_latency == (size_t) -1: default limit
1743 * min_latency anything else: specified limit
1744 *
1745 * Similar for max_latency */
1746
1747 if (min_latency == (pa_usec_t) -1)
1748 min_latency = DEFAULT_MIN_LATENCY;
1749
1750 if (max_latency == (pa_usec_t) -1)
1751 max_latency = min_latency;
1752
1753 pa_assert(!min_latency || !max_latency ||
1754 min_latency <= max_latency);
1755
1756 if (PA_SINK_IS_LINKED(s->state)) {
1757 pa_usec_t r[2];
1758
1759 r[0] = min_latency;
1760 r[1] = max_latency;
1761
1762 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
1763 } else {
1764 s->thread_info.min_latency = min_latency;
1765 s->thread_info.max_latency = max_latency;
1766
1767 s->monitor_source->thread_info.min_latency = min_latency;
1768 s->monitor_source->thread_info.max_latency = max_latency;
1769
1770 s->thread_info.requested_latency_valid = s->monitor_source->thread_info.requested_latency_valid = FALSE;
1771 }
1772 }
1773
1774 /* Called from main thread */
1775 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
1776 pa_sink_assert_ref(s);
1777 pa_assert(min_latency);
1778 pa_assert(max_latency);
1779
1780 if (PA_SINK_IS_LINKED(s->state)) {
1781 pa_usec_t r[2] = { 0, 0 };
1782
1783 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
1784
1785 *min_latency = r[0];
1786 *max_latency = r[1];
1787 } else {
1788 *min_latency = s->thread_info.min_latency;
1789 *max_latency = s->thread_info.max_latency;
1790 }
1791 }
1792
1793 /* Called from IO thread */
1794 void pa_sink_update_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
1795 pa_sink_input *i;
1796 void *state = NULL;
1797
1798 pa_sink_assert_ref(s);
1799
1800 s->thread_info.min_latency = min_latency;
1801 s->thread_info.max_latency = max_latency;
1802
1803 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1804 if (i->update_sink_latency_range)
1805 i->update_sink_latency_range(i);
1806
1807 pa_sink_invalidate_requested_latency(s);
1808
1809 pa_source_update_latency_range(s->monitor_source, min_latency, max_latency);
1810 }
1811
1812 /* Called from main context */
1813 size_t pa_sink_get_max_rewind(pa_sink *s) {
1814 size_t r;
1815 pa_sink_assert_ref(s);
1816
1817 if (!PA_SINK_IS_LINKED(s->state))
1818 return s->thread_info.max_rewind;
1819
1820 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
1821
1822 return r;
1823 }
1824
1825 /* Called from main context */
1826 size_t pa_sink_get_max_request(pa_sink *s) {
1827 size_t r;
1828 pa_sink_assert_ref(s);
1829
1830 if (!PA_SINK_IS_LINKED(s->state))
1831 return s->thread_info.max_request;
1832
1833 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
1834
1835 return r;
1836 }