]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: optimize pa_sink_render_full()
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_done(pa_sink_new_data *data) {
104 pa_assert(data);
105
106 pa_xfree(data->name);
107 pa_proplist_free(data->proplist);
108 }
109
110 /* Called from main context */
111 static void reset_callbacks(pa_sink *s) {
112 pa_assert(s);
113
114 s->set_state = NULL;
115 s->get_volume = NULL;
116 s->set_volume = NULL;
117 s->get_mute = NULL;
118 s->set_mute = NULL;
119 s->request_rewind = NULL;
120 s->update_requested_latency = NULL;
121 }
122
123 /* Called from main context */
124 pa_sink* pa_sink_new(
125 pa_core *core,
126 pa_sink_new_data *data,
127 pa_sink_flags_t flags) {
128
129 pa_sink *s;
130 const char *name;
131 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
132 pa_source_new_data source_data;
133 const char *dn;
134 char *pt;
135
136 pa_assert(core);
137 pa_assert(data);
138 pa_assert(data->name);
139
140 s = pa_msgobject_new(pa_sink);
141
142 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
143 pa_xfree(s);
144 return NULL;
145 }
146
147 pa_sink_new_data_set_name(data, name);
148
149 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
150 pa_xfree(s);
151 pa_namereg_unregister(core, name);
152 return NULL;
153 }
154
155 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
156 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
157
158 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
159
160 if (!data->channel_map_is_set)
161 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
162
163 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
164 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
165
166 if (!data->volume_is_set)
167 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
168
169 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
170 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
171
172 if (!data->muted_is_set)
173 data->muted = FALSE;
174
175 if (data->card)
176 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
177
178 pa_device_init_description(data->proplist);
179 pa_device_init_icon(data->proplist, TRUE);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 s->parent.parent.free = sink_free;
188 s->parent.process_msg = pa_sink_process_msg;
189
190 s->core = core;
191 s->state = PA_SINK_INIT;
192 s->flags = flags;
193 s->name = pa_xstrdup(name);
194 s->proplist = pa_proplist_copy(data->proplist);
195 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
196 s->module = data->module;
197 s->card = data->card;
198
199 s->sample_spec = data->sample_spec;
200 s->channel_map = data->channel_map;
201
202 s->inputs = pa_idxset_new(NULL, NULL);
203 s->n_corked = 0;
204
205 s->reference_volume = s->virtual_volume = data->volume;
206 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
207 s->base_volume = PA_VOLUME_NORM;
208 s->n_volume_steps = PA_VOLUME_NORM+1;
209 s->muted = data->muted;
210 s->refresh_volume = s->refresh_muted = FALSE;
211
212 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
213
214 reset_callbacks(s);
215 s->userdata = NULL;
216
217 s->asyncmsgq = NULL;
218 s->rtpoll = NULL;
219
220 pa_silence_memchunk_get(
221 &core->silence_cache,
222 core->mempool,
223 &s->silence,
224 &s->sample_spec,
225 0);
226
227 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
228 s->thread_info.soft_volume = s->soft_volume;
229 s->thread_info.soft_muted = s->muted;
230 s->thread_info.state = s->state;
231 s->thread_info.rewind_nbytes = 0;
232 s->thread_info.rewind_requested = FALSE;
233 s->thread_info.max_rewind = 0;
234 s->thread_info.max_request = 0;
235 s->thread_info.requested_latency_valid = FALSE;
236 s->thread_info.requested_latency = 0;
237 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
238 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
239
240 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
241
242 if (s->card)
243 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
244
245 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
246 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
247 s->index,
248 s->name,
249 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
250 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
251 pt);
252 pa_xfree(pt);
253
254 pa_source_new_data_init(&source_data);
255 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
256 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
257 source_data.name = pa_sprintf_malloc("%s.monitor", name);
258 source_data.driver = data->driver;
259 source_data.module = data->module;
260 source_data.card = data->card;
261
262 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
263 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
264 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
265
266 s->monitor_source = pa_source_new(core, &source_data,
267 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
268 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
269
270 pa_source_new_data_done(&source_data);
271
272 if (!s->monitor_source) {
273 pa_sink_unlink(s);
274 pa_sink_unref(s);
275 return NULL;
276 }
277
278 s->monitor_source->monitor_of = s;
279
280 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
281 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
282
283 return s;
284 }
285
286 /* Called from main context */
287 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
288 int ret;
289 pa_bool_t suspend_change;
290 pa_sink_state_t original_state;
291
292 pa_assert(s);
293
294 if (s->state == state)
295 return 0;
296
297 original_state = s->state;
298
299 suspend_change =
300 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
301 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
302
303 if (s->set_state)
304 if ((ret = s->set_state(s, state)) < 0)
305 return ret;
306
307 if (s->asyncmsgq)
308 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
309
310 if (s->set_state)
311 s->set_state(s, original_state);
312
313 return ret;
314 }
315
316 s->state = state;
317
318 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
319 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
320 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
321 }
322
323 if (suspend_change) {
324 pa_sink_input *i;
325 uint32_t idx;
326
327 /* We're suspending or resuming, tell everyone about it */
328
329 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
330 if (s->state == PA_SINK_SUSPENDED &&
331 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
332 pa_sink_input_kill(i);
333 else if (i->suspend)
334 i->suspend(i, state == PA_SINK_SUSPENDED);
335
336 if (s->monitor_source)
337 pa_source_sync_suspend(s->monitor_source);
338 }
339
340 return 0;
341 }
342
343 /* Called from main context */
344 void pa_sink_put(pa_sink* s) {
345 pa_sink_assert_ref(s);
346
347 pa_assert(s->state == PA_SINK_INIT);
348
349 /* The following fields must be initialized properly when calling _put() */
350 pa_assert(s->asyncmsgq);
351 pa_assert(s->rtpoll);
352 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
353
354 /* Generally, flags should be initialized via pa_sink_new(). As a
355 * special exception we allow volume related flags to be set
356 * between _new() and _put(). */
357
358 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
359 s->flags |= PA_SINK_DECIBEL_VOLUME;
360
361 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
362 s->flags |= PA_SINK_FLAT_VOLUME;
363
364 s->thread_info.soft_volume = s->soft_volume;
365 s->thread_info.soft_muted = s->muted;
366
367 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
368 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
369 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
370 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
371 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
372
373 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
374 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
375 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
376
377 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
378
379 pa_source_put(s->monitor_source);
380
381 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
382 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
383 }
384
385 /* Called from main context */
386 void pa_sink_unlink(pa_sink* s) {
387 pa_bool_t linked;
388 pa_sink_input *i, *j = NULL;
389
390 pa_assert(s);
391
392 /* Please note that pa_sink_unlink() does more than simply
393 * reversing pa_sink_put(). It also undoes the registrations
394 * already done in pa_sink_new()! */
395
396 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
397 * may be called multiple times on the same sink without bad
398 * effects. */
399
400 linked = PA_SINK_IS_LINKED(s->state);
401
402 if (linked)
403 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
404
405 if (s->state != PA_SINK_UNLINKED)
406 pa_namereg_unregister(s->core, s->name);
407 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
408
409 if (s->card)
410 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
411
412 while ((i = pa_idxset_first(s->inputs, NULL))) {
413 pa_assert(i != j);
414 pa_sink_input_kill(i);
415 j = i;
416 }
417
418 if (linked)
419 sink_set_state(s, PA_SINK_UNLINKED);
420 else
421 s->state = PA_SINK_UNLINKED;
422
423 reset_callbacks(s);
424
425 if (s->monitor_source)
426 pa_source_unlink(s->monitor_source);
427
428 if (linked) {
429 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
430 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
431 }
432 }
433
434 /* Called from main context */
435 static void sink_free(pa_object *o) {
436 pa_sink *s = PA_SINK(o);
437 pa_sink_input *i;
438
439 pa_assert(s);
440 pa_assert(pa_sink_refcnt(s) == 0);
441
442 if (PA_SINK_IS_LINKED(s->state))
443 pa_sink_unlink(s);
444
445 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
446
447 if (s->monitor_source) {
448 pa_source_unref(s->monitor_source);
449 s->monitor_source = NULL;
450 }
451
452 pa_idxset_free(s->inputs, NULL, NULL);
453
454 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
455 pa_sink_input_unref(i);
456
457 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
458
459 if (s->silence.memblock)
460 pa_memblock_unref(s->silence.memblock);
461
462 pa_xfree(s->name);
463 pa_xfree(s->driver);
464
465 if (s->proplist)
466 pa_proplist_free(s->proplist);
467
468 pa_xfree(s);
469 }
470
471 /* Called from main context */
472 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
473 pa_sink_assert_ref(s);
474
475 s->asyncmsgq = q;
476
477 if (s->monitor_source)
478 pa_source_set_asyncmsgq(s->monitor_source, q);
479 }
480
481 /* Called from main context */
482 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
483 pa_sink_assert_ref(s);
484
485 s->rtpoll = p;
486 if (s->monitor_source)
487 pa_source_set_rtpoll(s->monitor_source, p);
488 }
489
490 /* Called from main context */
491 int pa_sink_update_status(pa_sink*s) {
492 pa_sink_assert_ref(s);
493 pa_assert(PA_SINK_IS_LINKED(s->state));
494
495 if (s->state == PA_SINK_SUSPENDED)
496 return 0;
497
498 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
499 }
500
501 /* Called from main context */
502 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend) {
503 pa_sink_assert_ref(s);
504 pa_assert(PA_SINK_IS_LINKED(s->state));
505
506 if (suspend)
507 return sink_set_state(s, PA_SINK_SUSPENDED);
508 else
509 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
510 }
511
512 /* Called from main context */
513 pa_queue *pa_sink_move_all_start(pa_sink *s) {
514 pa_queue *q;
515 pa_sink_input *i, *n;
516 uint32_t idx;
517
518 pa_sink_assert_ref(s);
519 pa_assert(PA_SINK_IS_LINKED(s->state));
520
521 q = pa_queue_new();
522
523 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
524 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
525
526 pa_sink_input_ref(i);
527
528 if (pa_sink_input_start_move(i) >= 0)
529 pa_queue_push(q, i);
530 else
531 pa_sink_input_unref(i);
532 }
533
534 return q;
535 }
536
537 /* Called from main context */
538 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
539 pa_sink_input *i;
540
541 pa_sink_assert_ref(s);
542 pa_assert(PA_SINK_IS_LINKED(s->state));
543 pa_assert(q);
544
545 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
546 if (pa_sink_input_finish_move(i, s, save) < 0)
547 pa_sink_input_kill(i);
548
549 pa_sink_input_unref(i);
550 }
551
552 pa_queue_free(q, NULL, NULL);
553 }
554
555 /* Called from main context */
556 void pa_sink_move_all_fail(pa_queue *q) {
557 pa_sink_input *i;
558 pa_assert(q);
559
560 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
561 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
562 pa_sink_input_kill(i);
563 pa_sink_input_unref(i);
564 }
565 }
566
567 pa_queue_free(q, NULL, NULL);
568 }
569
570 /* Called from IO thread context */
571 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
572 pa_sink_input *i;
573 void *state = NULL;
574 pa_sink_assert_ref(s);
575 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
576
577 /* If nobody requested this and this is actually no real rewind
578 * then we can short cut this */
579 if (!s->thread_info.rewind_requested && nbytes <= 0)
580 return;
581
582 s->thread_info.rewind_nbytes = 0;
583 s->thread_info.rewind_requested = FALSE;
584
585 if (s->thread_info.state == PA_SINK_SUSPENDED)
586 return;
587
588 if (nbytes > 0)
589 pa_log_debug("Processing rewind...");
590
591 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
592 pa_sink_input_assert_ref(i);
593 pa_sink_input_process_rewind(i, nbytes);
594 }
595
596 if (nbytes > 0)
597 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
598 pa_source_process_rewind(s->monitor_source, nbytes);
599 }
600
601 /* Called from IO thread context */
602 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
603 pa_sink_input *i;
604 unsigned n = 0;
605 void *state = NULL;
606 size_t mixlength = *length;
607
608 pa_sink_assert_ref(s);
609 pa_assert(info);
610
611 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
612 pa_sink_input_assert_ref(i);
613
614 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
615
616 if (mixlength == 0 || info->chunk.length < mixlength)
617 mixlength = info->chunk.length;
618
619 if (pa_memblock_is_silence(info->chunk.memblock)) {
620 pa_memblock_unref(info->chunk.memblock);
621 continue;
622 }
623
624 info->userdata = pa_sink_input_ref(i);
625
626 pa_assert(info->chunk.memblock);
627 pa_assert(info->chunk.length > 0);
628
629 info++;
630 n++;
631 maxinfo--;
632 }
633
634 if (mixlength > 0)
635 *length = mixlength;
636
637 return n;
638 }
639
640 /* Called from IO thread context */
641 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
642 pa_sink_input *i;
643 void *state = NULL;
644 unsigned p = 0;
645 unsigned n_unreffed = 0;
646
647 pa_sink_assert_ref(s);
648 pa_assert(result);
649 pa_assert(result->memblock);
650 pa_assert(result->length > 0);
651
652 /* We optimize for the case where the order of the inputs has not changed */
653
654 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
655 unsigned j;
656 pa_mix_info* m = NULL;
657
658 pa_sink_input_assert_ref(i);
659
660 /* Let's try to find the matching entry info the pa_mix_info array */
661 for (j = 0; j < n; j ++) {
662
663 if (info[p].userdata == i) {
664 m = info + p;
665 break;
666 }
667
668 p++;
669 if (p >= n)
670 p = 0;
671 }
672
673 /* Drop read data */
674 pa_sink_input_drop(i, result->length);
675
676 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
677
678 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
679 void *ostate = NULL;
680 pa_source_output *o;
681 pa_memchunk c;
682
683 if (m && m->chunk.memblock) {
684 c = m->chunk;
685 pa_memblock_ref(c.memblock);
686 pa_assert(result->length <= c.length);
687 c.length = result->length;
688
689 pa_memchunk_make_writable(&c, 0);
690 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
691 } else {
692 c = s->silence;
693 pa_memblock_ref(c.memblock);
694 pa_assert(result->length <= c.length);
695 c.length = result->length;
696 }
697
698 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
699 pa_source_output_assert_ref(o);
700 pa_assert(o->direct_on_input == i);
701 pa_source_post_direct(s->monitor_source, o, &c);
702 }
703
704 pa_memblock_unref(c.memblock);
705 }
706 }
707
708 if (m) {
709 if (m->chunk.memblock)
710 pa_memblock_unref(m->chunk.memblock);
711 pa_memchunk_reset(&m->chunk);
712
713 pa_sink_input_unref(m->userdata);
714 m->userdata = NULL;
715
716 n_unreffed += 1;
717 }
718 }
719
720 /* Now drop references to entries that are included in the
721 * pa_mix_info array but don't exist anymore */
722
723 if (n_unreffed < n) {
724 for (; n > 0; info++, n--) {
725 if (info->userdata)
726 pa_sink_input_unref(info->userdata);
727 if (info->chunk.memblock)
728 pa_memblock_unref(info->chunk.memblock);
729 }
730 }
731
732 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
733 pa_source_post(s->monitor_source, result);
734 }
735
736 /* Called from IO thread context */
737 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
738 pa_mix_info info[MAX_MIX_CHANNELS];
739 unsigned n;
740 size_t block_size_max;
741
742 pa_sink_assert_ref(s);
743 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
744 pa_assert(pa_frame_aligned(length, &s->sample_spec));
745 pa_assert(result);
746
747 pa_sink_ref(s);
748
749 pa_assert(!s->thread_info.rewind_requested);
750 pa_assert(s->thread_info.rewind_nbytes == 0);
751
752 if (s->thread_info.state == PA_SINK_SUSPENDED) {
753 result->memblock = pa_memblock_ref(s->silence.memblock);
754 result->index = s->silence.index;
755 result->length = PA_MIN(s->silence.length, length);
756 return;
757 }
758
759 if (length <= 0)
760 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
761
762 block_size_max = pa_mempool_block_size_max(s->core->mempool);
763 if (length > block_size_max)
764 length = pa_frame_align(block_size_max, &s->sample_spec);
765
766 pa_assert(length > 0);
767
768 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
769
770 if (n == 0) {
771
772 *result = s->silence;
773 pa_memblock_ref(result->memblock);
774
775 if (result->length > length)
776 result->length = length;
777
778 } else if (n == 1) {
779 pa_cvolume volume;
780
781 *result = info[0].chunk;
782 pa_memblock_ref(result->memblock);
783
784 if (result->length > length)
785 result->length = length;
786
787 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
788
789 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
790 pa_memchunk_make_writable(result, 0);
791 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
792 pa_silence_memchunk(result, &s->sample_spec);
793 else
794 pa_volume_memchunk(result, &s->sample_spec, &volume);
795 }
796 } else {
797 void *ptr;
798 result->memblock = pa_memblock_new(s->core->mempool, length);
799
800 ptr = pa_memblock_acquire(result->memblock);
801 result->length = pa_mix(info, n,
802 ptr, length,
803 &s->sample_spec,
804 &s->thread_info.soft_volume,
805 s->thread_info.soft_muted);
806 pa_memblock_release(result->memblock);
807
808 result->index = 0;
809 }
810
811 inputs_drop(s, info, n, result);
812
813 pa_sink_unref(s);
814 }
815
816 /* Called from IO thread context */
817 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
818 pa_mix_info info[MAX_MIX_CHANNELS];
819 unsigned n;
820 size_t length, block_size_max;
821
822 pa_sink_assert_ref(s);
823 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
824 pa_assert(target);
825 pa_assert(target->memblock);
826 pa_assert(target->length > 0);
827 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
828
829 pa_sink_ref(s);
830
831 pa_assert(!s->thread_info.rewind_requested);
832 pa_assert(s->thread_info.rewind_nbytes == 0);
833
834 if (s->thread_info.state == PA_SINK_SUSPENDED) {
835 pa_silence_memchunk(target, &s->sample_spec);
836 return;
837 }
838
839 length = target->length;
840 block_size_max = pa_mempool_block_size_max(s->core->mempool);
841 if (length > block_size_max)
842 length = pa_frame_align(block_size_max, &s->sample_spec);
843
844 pa_assert(length > 0);
845
846 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
847
848 if (n == 0) {
849 if (target->length > length)
850 target->length = length;
851
852 pa_silence_memchunk(target, &s->sample_spec);
853 } else if (n == 1) {
854 pa_cvolume volume;
855
856 if (target->length > length)
857 target->length = length;
858
859 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
860
861 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
862 pa_silence_memchunk(target, &s->sample_spec);
863 else {
864 pa_memchunk vchunk;
865
866 vchunk = info[0].chunk;
867 pa_memblock_ref(vchunk.memblock);
868
869 if (vchunk.length > length)
870 vchunk.length = length;
871
872 if (!pa_cvolume_is_norm(&volume)) {
873 pa_memchunk_make_writable(&vchunk, 0);
874 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
875 }
876
877 pa_memchunk_memcpy(target, &vchunk);
878 pa_memblock_unref(vchunk.memblock);
879 }
880
881 } else {
882 void *ptr;
883
884 ptr = pa_memblock_acquire(target->memblock);
885
886 target->length = pa_mix(info, n,
887 (uint8_t*) ptr + target->index, length,
888 &s->sample_spec,
889 &s->thread_info.soft_volume,
890 s->thread_info.soft_muted);
891
892 pa_memblock_release(target->memblock);
893 }
894
895 inputs_drop(s, info, n, target);
896
897 pa_sink_unref(s);
898 }
899
900 /* Called from IO thread context */
901 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
902 pa_memchunk chunk;
903 size_t l, d;
904
905 pa_sink_assert_ref(s);
906 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
907 pa_assert(target);
908 pa_assert(target->memblock);
909 pa_assert(target->length > 0);
910 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
911
912 pa_sink_ref(s);
913
914 pa_assert(!s->thread_info.rewind_requested);
915 pa_assert(s->thread_info.rewind_nbytes == 0);
916
917 l = target->length;
918 d = 0;
919 while (l > 0) {
920 chunk = *target;
921 chunk.index += d;
922 chunk.length -= d;
923
924 pa_sink_render_into(s, &chunk);
925
926 d += chunk.length;
927 l -= chunk.length;
928 }
929
930 pa_sink_unref(s);
931 }
932
933 /* Called from IO thread context */
934 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
935 pa_mix_info info[MAX_MIX_CHANNELS];
936 size_t length1st = length;
937 unsigned n;
938
939 pa_sink_assert_ref(s);
940 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
941 pa_assert(length > 0);
942 pa_assert(pa_frame_aligned(length, &s->sample_spec));
943 pa_assert(result);
944
945 pa_sink_ref(s);
946
947 pa_assert(!s->thread_info.rewind_requested);
948 pa_assert(s->thread_info.rewind_nbytes == 0);
949
950 pa_assert(length > 0);
951
952 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
953
954 if (n == 0) {
955 pa_silence_memchunk_get(&s->core->silence_cache,
956 s->core->mempool,
957 result,
958 &s->sample_spec,
959 length1st);
960 } else if (n == 1) {
961 pa_cvolume volume;
962
963 *result = info[0].chunk;
964 pa_memblock_ref(result->memblock);
965
966 if (result->length > length)
967 result->length = length;
968
969 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
970
971 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
972 pa_memchunk_make_writable(result, length);
973 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
974 pa_silence_memchunk(result, &s->sample_spec);
975 else
976 pa_volume_memchunk(result, &s->sample_spec, &volume);
977 }
978 } else {
979 void *ptr;
980
981 result->index = 0;
982 result->memblock = pa_memblock_new(s->core->mempool, length);
983
984 ptr = pa_memblock_acquire(result->memblock);
985
986 result->length = pa_mix(info, n,
987 (uint8_t*) ptr + result->index, length1st,
988 &s->sample_spec,
989 &s->thread_info.soft_volume,
990 s->thread_info.soft_muted);
991
992 pa_memblock_release(result->memblock);
993 }
994
995 inputs_drop(s, info, n, result);
996
997 if (result->length < length) {
998 pa_memchunk chunk;
999 size_t l, d;
1000 pa_memchunk_make_writable(result, length);
1001 result->length = length;
1002
1003 l = length - result->length;
1004 d = result->index + result->length;
1005 while (l > 0) {
1006 chunk = *result;
1007 chunk.index += d;
1008 chunk.length -= d - result->index;
1009
1010 pa_sink_render_into(s, &chunk);
1011
1012 d += chunk.length;
1013 l -= chunk.length;
1014 }
1015 result->length = length;
1016 }
1017
1018 pa_sink_unref(s);
1019 }
1020
1021 /* Called from main thread */
1022 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1023 pa_usec_t usec = 0;
1024
1025 pa_sink_assert_ref(s);
1026 pa_assert(PA_SINK_IS_LINKED(s->state));
1027
1028 /* The returned value is supposed to be in the time domain of the sound card! */
1029
1030 if (s->state == PA_SINK_SUSPENDED)
1031 return 0;
1032
1033 if (!(s->flags & PA_SINK_LATENCY))
1034 return 0;
1035
1036 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1037
1038 return usec;
1039 }
1040
1041 /* Called from IO thread */
1042 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1043 pa_usec_t usec = 0;
1044 pa_msgobject *o;
1045
1046 pa_sink_assert_ref(s);
1047 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1048
1049 /* The returned value is supposed to be in the time domain of the sound card! */
1050
1051 if (s->thread_info.state == PA_SINK_SUSPENDED)
1052 return 0;
1053
1054 if (!(s->flags & PA_SINK_LATENCY))
1055 return 0;
1056
1057 o = PA_MSGOBJECT(s);
1058
1059 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1060
1061 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1062 return -1;
1063
1064 return usec;
1065 }
1066
1067 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1068 unsigned c;
1069
1070 pa_sink_input_assert_ref(i);
1071 pa_assert(new_volume->channels == i->sample_spec.channels);
1072
1073 /*
1074 * This basically calculates:
1075 *
1076 * i->relative_volume := i->virtual_volume / new_volume
1077 * i->soft_volume := i->relative_volume * i->volume_factor
1078 */
1079
1080 /* The new sink volume passed in here must already be remapped to
1081 * the sink input's channel map! */
1082
1083 i->soft_volume.channels = i->sample_spec.channels;
1084
1085 for (c = 0; c < i->sample_spec.channels; c++)
1086
1087 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1088 /* We leave i->relative_volume untouched */
1089 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1090 else {
1091 i->relative_volume[c] =
1092 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1093 pa_sw_volume_to_linear(new_volume->values[c]);
1094
1095 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1096 i->relative_volume[c] *
1097 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1098 }
1099
1100 /* Hooks have the ability to play games with i->soft_volume */
1101 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1102
1103 /* We don't copy the soft_volume to the thread_info data
1104 * here. That must be done by the caller */
1105 }
1106
1107 /* Called from main thread */
1108 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1109 pa_sink_input *i;
1110 uint32_t idx;
1111
1112 pa_sink_assert_ref(s);
1113 pa_assert(new_volume);
1114 pa_assert(PA_SINK_IS_LINKED(s->state));
1115 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1116
1117 /* This is called whenever a sink input volume changes or a sink
1118 * input is added/removed and we might need to fix up the sink
1119 * volume accordingly. Please note that we don't actually update
1120 * the sinks volume here, we only return how it needs to be
1121 * updated. The caller should then call pa_sink_set_volume().*/
1122
1123 if (pa_idxset_isempty(s->inputs)) {
1124 /* In the special case that we have no sink input we leave the
1125 * volume unmodified. */
1126 *new_volume = s->reference_volume;
1127 return;
1128 }
1129
1130 pa_cvolume_mute(new_volume, s->channel_map.channels);
1131
1132 /* First let's determine the new maximum volume of all inputs
1133 * connected to this sink */
1134 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1135 unsigned c;
1136 pa_cvolume remapped_volume;
1137
1138 remapped_volume = i->virtual_volume;
1139 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1140
1141 for (c = 0; c < new_volume->channels; c++)
1142 if (remapped_volume.values[c] > new_volume->values[c])
1143 new_volume->values[c] = remapped_volume.values[c];
1144 }
1145
1146 /* Then, let's update the soft volumes of all inputs connected
1147 * to this sink */
1148 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1149 pa_cvolume remapped_new_volume;
1150
1151 remapped_new_volume = *new_volume;
1152 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1153 compute_new_soft_volume(i, &remapped_new_volume);
1154
1155 /* We don't copy soft_volume to the thread_info data here
1156 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1157 * want the update to be atomically with the sink volume
1158 * update, hence we do it within the pa_sink_set_volume() call
1159 * below */
1160 }
1161 }
1162
1163 /* Called from main thread */
1164 void pa_sink_propagate_flat_volume(pa_sink *s) {
1165 pa_sink_input *i;
1166 uint32_t idx;
1167
1168 pa_sink_assert_ref(s);
1169 pa_assert(PA_SINK_IS_LINKED(s->state));
1170 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1171
1172 /* This is called whenever the sink volume changes that is not
1173 * caused by a sink input volume change. We need to fix up the
1174 * sink input volumes accordingly */
1175
1176 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1177 pa_cvolume sink_volume, new_virtual_volume;
1178 unsigned c;
1179
1180 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1181
1182 sink_volume = s->virtual_volume;
1183 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1184
1185 for (c = 0; c < i->sample_spec.channels; c++)
1186 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1187 i->relative_volume[c] *
1188 pa_sw_volume_to_linear(sink_volume.values[c]));
1189
1190 new_virtual_volume.channels = i->sample_spec.channels;
1191
1192 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1193 i->virtual_volume = new_virtual_volume;
1194
1195 /* Hmm, the soft volume might no longer actually match
1196 * what has been chosen as new virtual volume here,
1197 * especially when the old volume was
1198 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1199 * volumes here. */
1200 compute_new_soft_volume(i, &sink_volume);
1201
1202 /* The virtual volume changed, let's tell people so */
1203 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1204 }
1205 }
1206
1207 /* If the soft_volume of any of the sink inputs got changed, let's
1208 * make sure the thread copies are synced up. */
1209 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1210 }
1211
1212 /* Called from main thread */
1213 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference) {
1214 pa_bool_t virtual_volume_changed;
1215
1216 pa_sink_assert_ref(s);
1217 pa_assert(PA_SINK_IS_LINKED(s->state));
1218 pa_assert(volume);
1219 pa_assert(pa_cvolume_valid(volume));
1220 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1221
1222 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1223 s->virtual_volume = *volume;
1224
1225 if (become_reference)
1226 s->reference_volume = s->virtual_volume;
1227
1228 /* Propagate this volume change back to the inputs */
1229 if (virtual_volume_changed)
1230 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1231 pa_sink_propagate_flat_volume(s);
1232
1233 if (s->set_volume) {
1234 /* If we have a function set_volume(), then we do not apply a
1235 * soft volume by default. However, set_volume() is free to
1236 * apply one to s->soft_volume */
1237
1238 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1239 s->set_volume(s);
1240
1241 } else
1242 /* If we have no function set_volume(), then the soft volume
1243 * becomes the virtual volume */
1244 s->soft_volume = s->virtual_volume;
1245
1246 /* This tells the sink that soft and/or virtual volume changed */
1247 if (sendmsg)
1248 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1249
1250 if (virtual_volume_changed)
1251 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1252 }
1253
1254 /* Called from main thread. Only to be called by sink implementor */
1255 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1256 pa_sink_assert_ref(s);
1257 pa_assert(volume);
1258
1259 s->soft_volume = *volume;
1260
1261 if (PA_SINK_IS_LINKED(s->state))
1262 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1263 else
1264 s->thread_info.soft_volume = *volume;
1265 }
1266
1267 /* Called from main thread */
1268 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1269 pa_sink_assert_ref(s);
1270
1271 if (s->refresh_volume || force_refresh) {
1272 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1273
1274 if (s->get_volume)
1275 s->get_volume(s);
1276
1277 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1278
1279 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1280
1281 s->reference_volume = s->virtual_volume;
1282
1283 if (s->flags & PA_SINK_FLAT_VOLUME)
1284 pa_sink_propagate_flat_volume(s);
1285
1286 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1287 }
1288 }
1289
1290 return reference ? &s->reference_volume : &s->virtual_volume;
1291 }
1292
1293 /* Called from main thread */
1294 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1295 pa_sink_assert_ref(s);
1296
1297 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1298
1299 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1300 return;
1301
1302 s->reference_volume = s->virtual_volume = *new_volume;
1303
1304 if (s->flags & PA_SINK_FLAT_VOLUME)
1305 pa_sink_propagate_flat_volume(s);
1306
1307 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1308 }
1309
1310 /* Called from main thread */
1311 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1312 pa_bool_t old_muted;
1313
1314 pa_sink_assert_ref(s);
1315 pa_assert(PA_SINK_IS_LINKED(s->state));
1316
1317 old_muted = s->muted;
1318 s->muted = mute;
1319
1320 if (s->set_mute)
1321 s->set_mute(s);
1322
1323 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1324
1325 if (old_muted != s->muted)
1326 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1327 }
1328
1329 /* Called from main thread */
1330 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1331
1332 pa_sink_assert_ref(s);
1333
1334 if (s->refresh_muted || force_refresh) {
1335 pa_bool_t old_muted = s->muted;
1336
1337 if (s->get_mute)
1338 s->get_mute(s);
1339
1340 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1341
1342 if (old_muted != s->muted)
1343 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1344 }
1345
1346 return s->muted;
1347 }
1348
1349 /* Called from main thread */
1350 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1351 pa_sink_assert_ref(s);
1352
1353 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1354
1355 if (s->muted == new_muted)
1356 return;
1357
1358 s->muted = new_muted;
1359 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1360 }
1361
1362 /* Called from main thread */
1363 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1364 pa_sink_assert_ref(s);
1365
1366 if (p)
1367 pa_proplist_update(s->proplist, mode, p);
1368
1369 if (PA_SINK_IS_LINKED(s->state)) {
1370 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1371 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1372 }
1373
1374 return TRUE;
1375 }
1376
1377 /* Called from main thread */
1378 void pa_sink_set_description(pa_sink *s, const char *description) {
1379 const char *old;
1380 pa_sink_assert_ref(s);
1381
1382 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1383 return;
1384
1385 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1386
1387 if (old && description && !strcmp(old, description))
1388 return;
1389
1390 if (description)
1391 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1392 else
1393 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1394
1395 if (s->monitor_source) {
1396 char *n;
1397
1398 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1399 pa_source_set_description(s->monitor_source, n);
1400 pa_xfree(n);
1401 }
1402
1403 if (PA_SINK_IS_LINKED(s->state)) {
1404 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1405 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1406 }
1407 }
1408
1409 /* Called from main thread */
1410 unsigned pa_sink_linked_by(pa_sink *s) {
1411 unsigned ret;
1412
1413 pa_sink_assert_ref(s);
1414 pa_assert(PA_SINK_IS_LINKED(s->state));
1415
1416 ret = pa_idxset_size(s->inputs);
1417
1418 /* We add in the number of streams connected to us here. Please
1419 * note the asymmmetry to pa_sink_used_by()! */
1420
1421 if (s->monitor_source)
1422 ret += pa_source_linked_by(s->monitor_source);
1423
1424 return ret;
1425 }
1426
1427 /* Called from main thread */
1428 unsigned pa_sink_used_by(pa_sink *s) {
1429 unsigned ret;
1430
1431 pa_sink_assert_ref(s);
1432 pa_assert(PA_SINK_IS_LINKED(s->state));
1433
1434 ret = pa_idxset_size(s->inputs);
1435 pa_assert(ret >= s->n_corked);
1436
1437 /* Streams connected to our monitor source do not matter for
1438 * pa_sink_used_by()!.*/
1439
1440 return ret - s->n_corked;
1441 }
1442
1443 /* Called from main thread */
1444 unsigned pa_sink_check_suspend(pa_sink *s) {
1445 unsigned ret;
1446 pa_sink_input *i;
1447 uint32_t idx;
1448
1449 pa_sink_assert_ref(s);
1450
1451 if (!PA_SINK_IS_LINKED(s->state))
1452 return 0;
1453
1454 ret = 0;
1455
1456 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1457 pa_sink_input_state_t st;
1458
1459 st = pa_sink_input_get_state(i);
1460 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1461
1462 if (st == PA_SINK_INPUT_CORKED)
1463 continue;
1464
1465 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1466 continue;
1467
1468 ret ++;
1469 }
1470
1471 if (s->monitor_source)
1472 ret += pa_source_check_suspend(s->monitor_source);
1473
1474 return ret;
1475 }
1476
1477 /* Called from the IO thread */
1478 static void sync_input_volumes_within_thread(pa_sink *s) {
1479 pa_sink_input *i;
1480 void *state = NULL;
1481
1482 pa_sink_assert_ref(s);
1483
1484 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1485 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1486 continue;
1487
1488 i->thread_info.soft_volume = i->soft_volume;
1489 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1490 }
1491 }
1492
1493 /* Called from IO thread, except when it is not */
1494 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1495 pa_sink *s = PA_SINK(o);
1496 pa_sink_assert_ref(s);
1497
1498 switch ((pa_sink_message_t) code) {
1499
1500 case PA_SINK_MESSAGE_ADD_INPUT: {
1501 pa_sink_input *i = PA_SINK_INPUT(userdata);
1502
1503 /* If you change anything here, make sure to change the
1504 * sink input handling a few lines down at
1505 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1506
1507 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1508
1509 /* Since the caller sleeps in pa_sink_input_put(), we can
1510 * safely access data outside of thread_info even though
1511 * it is mutable */
1512
1513 if ((i->thread_info.sync_prev = i->sync_prev)) {
1514 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1515 pa_assert(i->sync_prev->sync_next == i);
1516 i->thread_info.sync_prev->thread_info.sync_next = i;
1517 }
1518
1519 if ((i->thread_info.sync_next = i->sync_next)) {
1520 pa_assert(i->sink == i->thread_info.sync_next->sink);
1521 pa_assert(i->sync_next->sync_prev == i);
1522 i->thread_info.sync_next->thread_info.sync_prev = i;
1523 }
1524
1525 pa_assert(!i->thread_info.attached);
1526 i->thread_info.attached = TRUE;
1527
1528 if (i->attach)
1529 i->attach(i);
1530
1531 pa_sink_input_set_state_within_thread(i, i->state);
1532
1533 /* The requested latency of the sink input needs to be
1534 * fixed up and then configured on the sink */
1535
1536 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1537 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1538
1539 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1540 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1541
1542 /* We don't rewind here automatically. This is left to the
1543 * sink input implementor because some sink inputs need a
1544 * slow start, i.e. need some time to buffer client
1545 * samples before beginning streaming. */
1546
1547 /* In flat volume mode we need to update the volume as
1548 * well */
1549 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1550 }
1551
1552 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1553 pa_sink_input *i = PA_SINK_INPUT(userdata);
1554
1555 /* If you change anything here, make sure to change the
1556 * sink input handling a few lines down at
1557 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1558
1559 if (i->detach)
1560 i->detach(i);
1561
1562 pa_sink_input_set_state_within_thread(i, i->state);
1563
1564 pa_assert(i->thread_info.attached);
1565 i->thread_info.attached = FALSE;
1566
1567 /* Since the caller sleeps in pa_sink_input_unlink(),
1568 * we can safely access data outside of thread_info even
1569 * though it is mutable */
1570
1571 pa_assert(!i->sync_prev);
1572 pa_assert(!i->sync_next);
1573
1574 if (i->thread_info.sync_prev) {
1575 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1576 i->thread_info.sync_prev = NULL;
1577 }
1578
1579 if (i->thread_info.sync_next) {
1580 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1581 i->thread_info.sync_next = NULL;
1582 }
1583
1584 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1585 pa_sink_input_unref(i);
1586
1587 pa_sink_invalidate_requested_latency(s);
1588 pa_sink_request_rewind(s, (size_t) -1);
1589
1590 /* In flat volume mode we need to update the volume as
1591 * well */
1592 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1593 }
1594
1595 case PA_SINK_MESSAGE_START_MOVE: {
1596 pa_sink_input *i = PA_SINK_INPUT(userdata);
1597
1598 /* We don't support moving synchronized streams. */
1599 pa_assert(!i->sync_prev);
1600 pa_assert(!i->sync_next);
1601 pa_assert(!i->thread_info.sync_next);
1602 pa_assert(!i->thread_info.sync_prev);
1603
1604 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1605 pa_usec_t usec = 0;
1606 size_t sink_nbytes, total_nbytes;
1607
1608 /* Get the latency of the sink */
1609 if (!(s->flags & PA_SINK_LATENCY) ||
1610 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1611 usec = 0;
1612
1613 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1614 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1615
1616 if (total_nbytes > 0) {
1617 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1618 i->thread_info.rewrite_flush = TRUE;
1619 pa_sink_input_process_rewind(i, sink_nbytes);
1620 }
1621 }
1622
1623 if (i->detach)
1624 i->detach(i);
1625
1626 pa_assert(i->thread_info.attached);
1627 i->thread_info.attached = FALSE;
1628
1629 /* Let's remove the sink input ...*/
1630 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1631 pa_sink_input_unref(i);
1632
1633 pa_sink_invalidate_requested_latency(s);
1634
1635 pa_log_debug("Requesting rewind due to started move");
1636 pa_sink_request_rewind(s, (size_t) -1);
1637
1638 /* In flat volume mode we need to update the volume as
1639 * well */
1640 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1641 }
1642
1643 case PA_SINK_MESSAGE_FINISH_MOVE: {
1644 pa_sink_input *i = PA_SINK_INPUT(userdata);
1645
1646 /* We don't support moving synchronized streams. */
1647 pa_assert(!i->sync_prev);
1648 pa_assert(!i->sync_next);
1649 pa_assert(!i->thread_info.sync_next);
1650 pa_assert(!i->thread_info.sync_prev);
1651
1652 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1653
1654 pa_assert(!i->thread_info.attached);
1655 i->thread_info.attached = TRUE;
1656
1657 if (i->attach)
1658 i->attach(i);
1659
1660 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1661 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1662
1663 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1664 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1665
1666 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1667 pa_usec_t usec = 0;
1668 size_t nbytes;
1669
1670 /* Get the latency of the sink */
1671 if (!(s->flags & PA_SINK_LATENCY) ||
1672 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1673 usec = 0;
1674
1675 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1676
1677 if (nbytes > 0)
1678 pa_sink_input_drop(i, nbytes);
1679
1680 pa_log_debug("Requesting rewind due to finished move");
1681 pa_sink_request_rewind(s, nbytes);
1682 }
1683
1684 /* In flat volume mode we need to update the volume as
1685 * well */
1686 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1687 }
1688
1689 case PA_SINK_MESSAGE_SET_VOLUME:
1690
1691 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1692 s->thread_info.soft_volume = s->soft_volume;
1693 pa_sink_request_rewind(s, (size_t) -1);
1694 }
1695
1696 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1697 return 0;
1698
1699 /* Fall through ... */
1700
1701 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1702 sync_input_volumes_within_thread(s);
1703 return 0;
1704
1705 case PA_SINK_MESSAGE_GET_VOLUME:
1706 return 0;
1707
1708 case PA_SINK_MESSAGE_SET_MUTE:
1709
1710 if (s->thread_info.soft_muted != s->muted) {
1711 s->thread_info.soft_muted = s->muted;
1712 pa_sink_request_rewind(s, (size_t) -1);
1713 }
1714
1715 return 0;
1716
1717 case PA_SINK_MESSAGE_GET_MUTE:
1718 return 0;
1719
1720 case PA_SINK_MESSAGE_SET_STATE: {
1721
1722 pa_bool_t suspend_change =
1723 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1724 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1725
1726 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1727
1728 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1729 s->thread_info.rewind_nbytes = 0;
1730 s->thread_info.rewind_requested = FALSE;
1731 }
1732
1733 if (suspend_change) {
1734 pa_sink_input *i;
1735 void *state = NULL;
1736
1737 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1738 if (i->suspend_within_thread)
1739 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1740 }
1741
1742 return 0;
1743 }
1744
1745 case PA_SINK_MESSAGE_DETACH:
1746
1747 /* Detach all streams */
1748 pa_sink_detach_within_thread(s);
1749 return 0;
1750
1751 case PA_SINK_MESSAGE_ATTACH:
1752
1753 /* Reattach all streams */
1754 pa_sink_attach_within_thread(s);
1755 return 0;
1756
1757 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1758
1759 pa_usec_t *usec = userdata;
1760 *usec = pa_sink_get_requested_latency_within_thread(s);
1761
1762 if (*usec == (pa_usec_t) -1)
1763 *usec = s->thread_info.max_latency;
1764
1765 return 0;
1766 }
1767
1768 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1769 pa_usec_t *r = userdata;
1770
1771 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1772
1773 return 0;
1774 }
1775
1776 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1777 pa_usec_t *r = userdata;
1778
1779 r[0] = s->thread_info.min_latency;
1780 r[1] = s->thread_info.max_latency;
1781
1782 return 0;
1783 }
1784
1785 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1786
1787 *((size_t*) userdata) = s->thread_info.max_rewind;
1788 return 0;
1789
1790 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1791
1792 *((size_t*) userdata) = s->thread_info.max_request;
1793 return 0;
1794
1795 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1796
1797 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1798 return 0;
1799
1800 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1801
1802 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1803 return 0;
1804
1805 case PA_SINK_MESSAGE_GET_LATENCY:
1806 case PA_SINK_MESSAGE_MAX:
1807 ;
1808 }
1809
1810 return -1;
1811 }
1812
1813 /* Called from main thread */
1814 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend) {
1815 pa_sink *sink;
1816 uint32_t idx;
1817 int ret = 0;
1818
1819 pa_core_assert_ref(c);
1820
1821 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1822 int r;
1823
1824 if ((r = pa_sink_suspend(sink, suspend)) < 0)
1825 ret = r;
1826 }
1827
1828 return ret;
1829 }
1830
1831 /* Called from main thread */
1832 void pa_sink_detach(pa_sink *s) {
1833 pa_sink_assert_ref(s);
1834 pa_assert(PA_SINK_IS_LINKED(s->state));
1835
1836 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1837 }
1838
1839 /* Called from main thread */
1840 void pa_sink_attach(pa_sink *s) {
1841 pa_sink_assert_ref(s);
1842 pa_assert(PA_SINK_IS_LINKED(s->state));
1843
1844 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1845 }
1846
1847 /* Called from IO thread */
1848 void pa_sink_detach_within_thread(pa_sink *s) {
1849 pa_sink_input *i;
1850 void *state = NULL;
1851
1852 pa_sink_assert_ref(s);
1853 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1854
1855 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1856 if (i->detach)
1857 i->detach(i);
1858
1859 if (s->monitor_source)
1860 pa_source_detach_within_thread(s->monitor_source);
1861 }
1862
1863 /* Called from IO thread */
1864 void pa_sink_attach_within_thread(pa_sink *s) {
1865 pa_sink_input *i;
1866 void *state = NULL;
1867
1868 pa_sink_assert_ref(s);
1869 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1870
1871 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1872 if (i->attach)
1873 i->attach(i);
1874
1875 if (s->monitor_source)
1876 pa_source_attach_within_thread(s->monitor_source);
1877 }
1878
1879 /* Called from IO thread */
1880 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1881 pa_sink_assert_ref(s);
1882 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1883
1884 if (s->thread_info.state == PA_SINK_SUSPENDED)
1885 return;
1886
1887 if (nbytes == (size_t) -1)
1888 nbytes = s->thread_info.max_rewind;
1889
1890 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1891
1892 if (s->thread_info.rewind_requested &&
1893 nbytes <= s->thread_info.rewind_nbytes)
1894 return;
1895
1896 s->thread_info.rewind_nbytes = nbytes;
1897 s->thread_info.rewind_requested = TRUE;
1898
1899 if (s->request_rewind)
1900 s->request_rewind(s);
1901 }
1902
1903 /* Called from IO thread */
1904 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1905 pa_usec_t result = (pa_usec_t) -1;
1906 pa_sink_input *i;
1907 void *state = NULL;
1908 pa_usec_t monitor_latency;
1909
1910 pa_sink_assert_ref(s);
1911
1912 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
1913 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1914
1915 if (s->thread_info.requested_latency_valid)
1916 return s->thread_info.requested_latency;
1917
1918 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1919
1920 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1921 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1922 result = i->thread_info.requested_sink_latency;
1923
1924 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1925
1926 if (monitor_latency != (pa_usec_t) -1 &&
1927 (result == (pa_usec_t) -1 || result > monitor_latency))
1928 result = monitor_latency;
1929
1930 if (result != (pa_usec_t) -1)
1931 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1932
1933 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1934 /* Only cache if properly initialized */
1935 s->thread_info.requested_latency = result;
1936 s->thread_info.requested_latency_valid = TRUE;
1937 }
1938
1939 return result;
1940 }
1941
1942 /* Called from main thread */
1943 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1944 pa_usec_t usec = 0;
1945
1946 pa_sink_assert_ref(s);
1947 pa_assert(PA_SINK_IS_LINKED(s->state));
1948
1949 if (s->state == PA_SINK_SUSPENDED)
1950 return 0;
1951
1952 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1953 return usec;
1954 }
1955
1956 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1957 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1958 pa_sink_input *i;
1959 void *state = NULL;
1960
1961 pa_sink_assert_ref(s);
1962
1963 if (max_rewind == s->thread_info.max_rewind)
1964 return;
1965
1966 s->thread_info.max_rewind = max_rewind;
1967
1968 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1969 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1970 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1971 }
1972
1973 if (s->monitor_source)
1974 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
1975 }
1976
1977 /* Called from main thread */
1978 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
1979 pa_sink_assert_ref(s);
1980
1981 if (PA_SINK_IS_LINKED(s->state))
1982 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
1983 else
1984 pa_sink_set_max_rewind_within_thread(s, max_rewind);
1985 }
1986
1987 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1988 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
1989 void *state = NULL;
1990
1991 pa_sink_assert_ref(s);
1992
1993 if (max_request == s->thread_info.max_request)
1994 return;
1995
1996 s->thread_info.max_request = max_request;
1997
1998 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1999 pa_sink_input *i;
2000
2001 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2002 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2003 }
2004 }
2005
2006 /* Called from main thread */
2007 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2008 pa_sink_assert_ref(s);
2009
2010 if (PA_SINK_IS_LINKED(s->state))
2011 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2012 else
2013 pa_sink_set_max_request_within_thread(s, max_request);
2014 }
2015
2016 /* Called from IO thread */
2017 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2018 pa_sink_input *i;
2019 void *state = NULL;
2020
2021 pa_sink_assert_ref(s);
2022
2023 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2024 return;
2025
2026 s->thread_info.requested_latency_valid = FALSE;
2027
2028 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2029
2030 if (s->update_requested_latency)
2031 s->update_requested_latency(s);
2032
2033 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2034 if (i->update_sink_requested_latency)
2035 i->update_sink_requested_latency(i);
2036 }
2037 }
2038
2039 /* Called from main thread */
2040 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2041 pa_sink_assert_ref(s);
2042
2043 /* min_latency == 0: no limit
2044 * min_latency anything else: specified limit
2045 *
2046 * Similar for max_latency */
2047
2048 if (min_latency < ABSOLUTE_MIN_LATENCY)
2049 min_latency = ABSOLUTE_MIN_LATENCY;
2050
2051 if (max_latency <= 0 ||
2052 max_latency > ABSOLUTE_MAX_LATENCY)
2053 max_latency = ABSOLUTE_MAX_LATENCY;
2054
2055 pa_assert(min_latency <= max_latency);
2056
2057 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2058 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2059 max_latency == ABSOLUTE_MAX_LATENCY) ||
2060 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2061
2062 if (PA_SINK_IS_LINKED(s->state)) {
2063 pa_usec_t r[2];
2064
2065 r[0] = min_latency;
2066 r[1] = max_latency;
2067
2068 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2069 } else
2070 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2071 }
2072
2073 /* Called from main thread */
2074 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2075 pa_sink_assert_ref(s);
2076 pa_assert(min_latency);
2077 pa_assert(max_latency);
2078
2079 if (PA_SINK_IS_LINKED(s->state)) {
2080 pa_usec_t r[2] = { 0, 0 };
2081
2082 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2083
2084 *min_latency = r[0];
2085 *max_latency = r[1];
2086 } else {
2087 *min_latency = s->thread_info.min_latency;
2088 *max_latency = s->thread_info.max_latency;
2089 }
2090 }
2091
2092 /* Called from IO thread */
2093 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2094 void *state = NULL;
2095
2096 pa_sink_assert_ref(s);
2097
2098 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2099 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2100 pa_assert(min_latency <= max_latency);
2101
2102 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2103 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2104 max_latency == ABSOLUTE_MAX_LATENCY) ||
2105 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2106
2107 s->thread_info.min_latency = min_latency;
2108 s->thread_info.max_latency = max_latency;
2109
2110 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2111 pa_sink_input *i;
2112
2113 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2114 if (i->update_sink_latency_range)
2115 i->update_sink_latency_range(i);
2116 }
2117
2118 pa_sink_invalidate_requested_latency(s);
2119
2120 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2121 }
2122
2123 /* Called from main thread, before the sink is put */
2124 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2125 pa_sink_assert_ref(s);
2126
2127 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2128
2129 if (latency < ABSOLUTE_MIN_LATENCY)
2130 latency = ABSOLUTE_MIN_LATENCY;
2131
2132 if (latency > ABSOLUTE_MAX_LATENCY)
2133 latency = ABSOLUTE_MAX_LATENCY;
2134
2135 s->fixed_latency = latency;
2136 pa_source_set_fixed_latency(s->monitor_source, latency);
2137 }
2138
2139 /* Called from main context */
2140 size_t pa_sink_get_max_rewind(pa_sink *s) {
2141 size_t r;
2142 pa_sink_assert_ref(s);
2143
2144 if (!PA_SINK_IS_LINKED(s->state))
2145 return s->thread_info.max_rewind;
2146
2147 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2148
2149 return r;
2150 }
2151
2152 /* Called from main context */
2153 size_t pa_sink_get_max_request(pa_sink *s) {
2154 size_t r;
2155 pa_sink_assert_ref(s);
2156
2157 if (!PA_SINK_IS_LINKED(s->state))
2158 return s->thread_info.max_request;
2159
2160 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2161
2162 return r;
2163 }
2164
2165 /* Called from main context */
2166 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2167 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2168
2169 pa_assert(p);
2170
2171 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2172 return TRUE;
2173
2174 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2175
2176 if (pa_streq(ff, "microphone"))
2177 t = "audio-input-microphone";
2178 else if (pa_streq(ff, "webcam"))
2179 t = "camera-web";
2180 else if (pa_streq(ff, "computer"))
2181 t = "computer";
2182 else if (pa_streq(ff, "handset"))
2183 t = "phone";
2184 else if (pa_streq(ff, "portable"))
2185 t = "multimedia-player";
2186 else if (pa_streq(ff, "tv"))
2187 t = "video-display";
2188 }
2189
2190 if (!t)
2191 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2192 if (pa_streq(c, "modem"))
2193 t = "modem";
2194
2195 if (!t) {
2196 if (is_sink)
2197 t = "audio-card";
2198 else
2199 t = "audio-input-microphone";
2200 }
2201
2202 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2203 if (strstr(profile, "analog"))
2204 s = "-analog";
2205 else if (strstr(profile, "iec958"))
2206 s = "-iec958";
2207 else if (strstr(profile, "hdmi"))
2208 s = "-hdmi";
2209 }
2210
2211 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2212
2213 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2214
2215 return TRUE;
2216 }
2217
2218 pa_bool_t pa_device_init_description(pa_proplist *p) {
2219 const char *s;
2220 pa_assert(p);
2221
2222 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2223 return TRUE;
2224
2225 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2226 if (pa_streq(s, "internal")) {
2227 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2228 return TRUE;
2229 }
2230
2231 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2232 if (pa_streq(s, "modem")) {
2233 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2234 return TRUE;
2235 }
2236
2237 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2238 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2239 return TRUE;
2240 }
2241
2242 return FALSE;
2243 }