]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: monitor sources need to inherit the suspend cause from their sinks
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_done(pa_sink_new_data *data) {
104 pa_assert(data);
105
106 pa_xfree(data->name);
107 pa_proplist_free(data->proplist);
108 }
109
110 /* Called from main context */
111 static void reset_callbacks(pa_sink *s) {
112 pa_assert(s);
113
114 s->set_state = NULL;
115 s->get_volume = NULL;
116 s->set_volume = NULL;
117 s->get_mute = NULL;
118 s->set_mute = NULL;
119 s->request_rewind = NULL;
120 s->update_requested_latency = NULL;
121 }
122
123 /* Called from main context */
124 pa_sink* pa_sink_new(
125 pa_core *core,
126 pa_sink_new_data *data,
127 pa_sink_flags_t flags) {
128
129 pa_sink *s;
130 const char *name;
131 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
132 pa_source_new_data source_data;
133 const char *dn;
134 char *pt;
135
136 pa_assert(core);
137 pa_assert(data);
138 pa_assert(data->name);
139
140 s = pa_msgobject_new(pa_sink);
141
142 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
143 pa_xfree(s);
144 return NULL;
145 }
146
147 pa_sink_new_data_set_name(data, name);
148
149 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
150 pa_xfree(s);
151 pa_namereg_unregister(core, name);
152 return NULL;
153 }
154
155 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
156 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
157
158 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
159
160 if (!data->channel_map_is_set)
161 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
162
163 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
164 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
165
166 if (!data->volume_is_set)
167 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
168
169 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
170 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
171
172 if (!data->muted_is_set)
173 data->muted = FALSE;
174
175 if (data->card)
176 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
177
178 pa_device_init_description(data->proplist);
179 pa_device_init_icon(data->proplist, TRUE);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 s->parent.parent.free = sink_free;
188 s->parent.process_msg = pa_sink_process_msg;
189
190 s->core = core;
191 s->state = PA_SINK_INIT;
192 s->flags = flags;
193 s->suspend_cause = 0;
194 s->name = pa_xstrdup(name);
195 s->proplist = pa_proplist_copy(data->proplist);
196 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
197 s->module = data->module;
198 s->card = data->card;
199
200 s->sample_spec = data->sample_spec;
201 s->channel_map = data->channel_map;
202
203 s->inputs = pa_idxset_new(NULL, NULL);
204 s->n_corked = 0;
205
206 s->reference_volume = s->virtual_volume = data->volume;
207 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
208 s->base_volume = PA_VOLUME_NORM;
209 s->n_volume_steps = PA_VOLUME_NORM+1;
210 s->muted = data->muted;
211 s->refresh_volume = s->refresh_muted = FALSE;
212
213 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
214
215 reset_callbacks(s);
216 s->userdata = NULL;
217
218 s->asyncmsgq = NULL;
219 s->rtpoll = NULL;
220
221 pa_silence_memchunk_get(
222 &core->silence_cache,
223 core->mempool,
224 &s->silence,
225 &s->sample_spec,
226 0);
227
228 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
229 s->thread_info.soft_volume = s->soft_volume;
230 s->thread_info.soft_muted = s->muted;
231 s->thread_info.state = s->state;
232 s->thread_info.rewind_nbytes = 0;
233 s->thread_info.rewind_requested = FALSE;
234 s->thread_info.max_rewind = 0;
235 s->thread_info.max_request = 0;
236 s->thread_info.requested_latency_valid = FALSE;
237 s->thread_info.requested_latency = 0;
238 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
239 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
240
241 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
242
243 if (s->card)
244 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
245
246 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
247 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
248 s->index,
249 s->name,
250 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
251 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
252 pt);
253 pa_xfree(pt);
254
255 pa_source_new_data_init(&source_data);
256 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
257 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
258 source_data.name = pa_sprintf_malloc("%s.monitor", name);
259 source_data.driver = data->driver;
260 source_data.module = data->module;
261 source_data.card = data->card;
262
263 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
264 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
265 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
266
267 s->monitor_source = pa_source_new(core, &source_data,
268 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
269 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
270
271 pa_source_new_data_done(&source_data);
272
273 if (!s->monitor_source) {
274 pa_sink_unlink(s);
275 pa_sink_unref(s);
276 return NULL;
277 }
278
279 s->monitor_source->monitor_of = s;
280
281 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
282 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
283
284 return s;
285 }
286
287 /* Called from main context */
288 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
289 int ret;
290 pa_bool_t suspend_change;
291 pa_sink_state_t original_state;
292
293 pa_assert(s);
294
295 if (s->state == state)
296 return 0;
297
298 original_state = s->state;
299
300 suspend_change =
301 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
302 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
303
304 if (s->set_state)
305 if ((ret = s->set_state(s, state)) < 0)
306 return ret;
307
308 if (s->asyncmsgq)
309 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
310
311 if (s->set_state)
312 s->set_state(s, original_state);
313
314 return ret;
315 }
316
317 s->state = state;
318
319 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
320 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
321 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
322 }
323
324 if (suspend_change) {
325 pa_sink_input *i;
326 uint32_t idx;
327
328 /* We're suspending or resuming, tell everyone about it */
329
330 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
331 if (s->state == PA_SINK_SUSPENDED &&
332 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
333 pa_sink_input_kill(i);
334 else if (i->suspend)
335 i->suspend(i, state == PA_SINK_SUSPENDED);
336
337 if (s->monitor_source)
338 pa_source_sync_suspend(s->monitor_source);
339 }
340
341 return 0;
342 }
343
344 /* Called from main context */
345 void pa_sink_put(pa_sink* s) {
346 pa_sink_assert_ref(s);
347
348 pa_assert(s->state == PA_SINK_INIT);
349
350 /* The following fields must be initialized properly when calling _put() */
351 pa_assert(s->asyncmsgq);
352 pa_assert(s->rtpoll);
353 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
354
355 /* Generally, flags should be initialized via pa_sink_new(). As a
356 * special exception we allow volume related flags to be set
357 * between _new() and _put(). */
358
359 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
360 s->flags |= PA_SINK_DECIBEL_VOLUME;
361
362 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
363 s->flags |= PA_SINK_FLAT_VOLUME;
364
365 s->thread_info.soft_volume = s->soft_volume;
366 s->thread_info.soft_muted = s->muted;
367
368 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
369 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
370 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
371 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
372 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
373
374 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
375 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
376 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
377
378 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
379
380 pa_source_put(s->monitor_source);
381
382 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
383 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
384 }
385
386 /* Called from main context */
387 void pa_sink_unlink(pa_sink* s) {
388 pa_bool_t linked;
389 pa_sink_input *i, *j = NULL;
390
391 pa_assert(s);
392
393 /* Please note that pa_sink_unlink() does more than simply
394 * reversing pa_sink_put(). It also undoes the registrations
395 * already done in pa_sink_new()! */
396
397 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
398 * may be called multiple times on the same sink without bad
399 * effects. */
400
401 linked = PA_SINK_IS_LINKED(s->state);
402
403 if (linked)
404 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
405
406 if (s->state != PA_SINK_UNLINKED)
407 pa_namereg_unregister(s->core, s->name);
408 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
409
410 if (s->card)
411 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
412
413 while ((i = pa_idxset_first(s->inputs, NULL))) {
414 pa_assert(i != j);
415 pa_sink_input_kill(i);
416 j = i;
417 }
418
419 if (linked)
420 sink_set_state(s, PA_SINK_UNLINKED);
421 else
422 s->state = PA_SINK_UNLINKED;
423
424 reset_callbacks(s);
425
426 if (s->monitor_source)
427 pa_source_unlink(s->monitor_source);
428
429 if (linked) {
430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
431 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
432 }
433 }
434
435 /* Called from main context */
436 static void sink_free(pa_object *o) {
437 pa_sink *s = PA_SINK(o);
438 pa_sink_input *i;
439
440 pa_assert(s);
441 pa_assert(pa_sink_refcnt(s) == 0);
442
443 if (PA_SINK_IS_LINKED(s->state))
444 pa_sink_unlink(s);
445
446 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
447
448 if (s->monitor_source) {
449 pa_source_unref(s->monitor_source);
450 s->monitor_source = NULL;
451 }
452
453 pa_idxset_free(s->inputs, NULL, NULL);
454
455 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
456 pa_sink_input_unref(i);
457
458 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
459
460 if (s->silence.memblock)
461 pa_memblock_unref(s->silence.memblock);
462
463 pa_xfree(s->name);
464 pa_xfree(s->driver);
465
466 if (s->proplist)
467 pa_proplist_free(s->proplist);
468
469 pa_xfree(s);
470 }
471
472 /* Called from main context */
473 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
474 pa_sink_assert_ref(s);
475
476 s->asyncmsgq = q;
477
478 if (s->monitor_source)
479 pa_source_set_asyncmsgq(s->monitor_source, q);
480 }
481
482 /* Called from main context */
483 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
484 pa_sink_assert_ref(s);
485
486 s->rtpoll = p;
487 if (s->monitor_source)
488 pa_source_set_rtpoll(s->monitor_source, p);
489 }
490
491 /* Called from main context */
492 int pa_sink_update_status(pa_sink*s) {
493 pa_sink_assert_ref(s);
494 pa_assert(PA_SINK_IS_LINKED(s->state));
495
496 if (s->state == PA_SINK_SUSPENDED)
497 return 0;
498
499 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
500 }
501
502 /* Called from main context */
503 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
504 pa_sink_assert_ref(s);
505 pa_assert(PA_SINK_IS_LINKED(s->state));
506 pa_assert(cause != 0);
507
508 if (suspend) {
509 s->suspend_cause |= cause;
510 s->monitor_source->suspend_cause |= cause;
511 } else {
512 s->suspend_cause &= ~cause;
513 s->monitor_source->suspend_cause &= ~cause;
514 }
515
516 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
517 return 0;
518
519 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
520
521 if (s->suspend_cause)
522 return sink_set_state(s, PA_SINK_SUSPENDED);
523 else
524 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
525 }
526
527 /* Called from main context */
528 pa_queue *pa_sink_move_all_start(pa_sink *s) {
529 pa_queue *q;
530 pa_sink_input *i, *n;
531 uint32_t idx;
532
533 pa_sink_assert_ref(s);
534 pa_assert(PA_SINK_IS_LINKED(s->state));
535
536 q = pa_queue_new();
537
538 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
539 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
540
541 pa_sink_input_ref(i);
542
543 if (pa_sink_input_start_move(i) >= 0)
544 pa_queue_push(q, i);
545 else
546 pa_sink_input_unref(i);
547 }
548
549 return q;
550 }
551
552 /* Called from main context */
553 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
554 pa_sink_input *i;
555
556 pa_sink_assert_ref(s);
557 pa_assert(PA_SINK_IS_LINKED(s->state));
558 pa_assert(q);
559
560 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
561 if (pa_sink_input_finish_move(i, s, save) < 0)
562 pa_sink_input_kill(i);
563
564 pa_sink_input_unref(i);
565 }
566
567 pa_queue_free(q, NULL, NULL);
568 }
569
570 /* Called from main context */
571 void pa_sink_move_all_fail(pa_queue *q) {
572 pa_sink_input *i;
573 pa_assert(q);
574
575 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
576 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
577 pa_sink_input_kill(i);
578 pa_sink_input_unref(i);
579 }
580 }
581
582 pa_queue_free(q, NULL, NULL);
583 }
584
585 /* Called from IO thread context */
586 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
587 pa_sink_input *i;
588 void *state = NULL;
589 pa_sink_assert_ref(s);
590 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
591
592 /* If nobody requested this and this is actually no real rewind
593 * then we can short cut this */
594 if (!s->thread_info.rewind_requested && nbytes <= 0)
595 return;
596
597 s->thread_info.rewind_nbytes = 0;
598 s->thread_info.rewind_requested = FALSE;
599
600 if (s->thread_info.state == PA_SINK_SUSPENDED)
601 return;
602
603 if (nbytes > 0)
604 pa_log_debug("Processing rewind...");
605
606 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
607 pa_sink_input_assert_ref(i);
608 pa_sink_input_process_rewind(i, nbytes);
609 }
610
611 if (nbytes > 0)
612 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
613 pa_source_process_rewind(s->monitor_source, nbytes);
614 }
615
616 /* Called from IO thread context */
617 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
618 pa_sink_input *i;
619 unsigned n = 0;
620 void *state = NULL;
621 size_t mixlength = *length;
622
623 pa_sink_assert_ref(s);
624 pa_assert(info);
625
626 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
627 pa_sink_input_assert_ref(i);
628
629 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
630
631 if (mixlength == 0 || info->chunk.length < mixlength)
632 mixlength = info->chunk.length;
633
634 if (pa_memblock_is_silence(info->chunk.memblock)) {
635 pa_memblock_unref(info->chunk.memblock);
636 continue;
637 }
638
639 info->userdata = pa_sink_input_ref(i);
640
641 pa_assert(info->chunk.memblock);
642 pa_assert(info->chunk.length > 0);
643
644 info++;
645 n++;
646 maxinfo--;
647 }
648
649 if (mixlength > 0)
650 *length = mixlength;
651
652 return n;
653 }
654
655 /* Called from IO thread context */
656 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
657 pa_sink_input *i;
658 void *state = NULL;
659 unsigned p = 0;
660 unsigned n_unreffed = 0;
661
662 pa_sink_assert_ref(s);
663 pa_assert(result);
664 pa_assert(result->memblock);
665 pa_assert(result->length > 0);
666
667 /* We optimize for the case where the order of the inputs has not changed */
668
669 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
670 unsigned j;
671 pa_mix_info* m = NULL;
672
673 pa_sink_input_assert_ref(i);
674
675 /* Let's try to find the matching entry info the pa_mix_info array */
676 for (j = 0; j < n; j ++) {
677
678 if (info[p].userdata == i) {
679 m = info + p;
680 break;
681 }
682
683 p++;
684 if (p >= n)
685 p = 0;
686 }
687
688 /* Drop read data */
689 pa_sink_input_drop(i, result->length);
690
691 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
692
693 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
694 void *ostate = NULL;
695 pa_source_output *o;
696 pa_memchunk c;
697
698 if (m && m->chunk.memblock) {
699 c = m->chunk;
700 pa_memblock_ref(c.memblock);
701 pa_assert(result->length <= c.length);
702 c.length = result->length;
703
704 pa_memchunk_make_writable(&c, 0);
705 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
706 } else {
707 c = s->silence;
708 pa_memblock_ref(c.memblock);
709 pa_assert(result->length <= c.length);
710 c.length = result->length;
711 }
712
713 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
714 pa_source_output_assert_ref(o);
715 pa_assert(o->direct_on_input == i);
716 pa_source_post_direct(s->monitor_source, o, &c);
717 }
718
719 pa_memblock_unref(c.memblock);
720 }
721 }
722
723 if (m) {
724 if (m->chunk.memblock)
725 pa_memblock_unref(m->chunk.memblock);
726 pa_memchunk_reset(&m->chunk);
727
728 pa_sink_input_unref(m->userdata);
729 m->userdata = NULL;
730
731 n_unreffed += 1;
732 }
733 }
734
735 /* Now drop references to entries that are included in the
736 * pa_mix_info array but don't exist anymore */
737
738 if (n_unreffed < n) {
739 for (; n > 0; info++, n--) {
740 if (info->userdata)
741 pa_sink_input_unref(info->userdata);
742 if (info->chunk.memblock)
743 pa_memblock_unref(info->chunk.memblock);
744 }
745 }
746
747 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
748 pa_source_post(s->monitor_source, result);
749 }
750
751 /* Called from IO thread context */
752 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
753 pa_mix_info info[MAX_MIX_CHANNELS];
754 unsigned n;
755 size_t block_size_max;
756
757 pa_sink_assert_ref(s);
758 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
759 pa_assert(pa_frame_aligned(length, &s->sample_spec));
760 pa_assert(result);
761
762 pa_sink_ref(s);
763
764 pa_assert(!s->thread_info.rewind_requested);
765 pa_assert(s->thread_info.rewind_nbytes == 0);
766
767 if (s->thread_info.state == PA_SINK_SUSPENDED) {
768 result->memblock = pa_memblock_ref(s->silence.memblock);
769 result->index = s->silence.index;
770 result->length = PA_MIN(s->silence.length, length);
771 return;
772 }
773
774 if (length <= 0)
775 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
776
777 block_size_max = pa_mempool_block_size_max(s->core->mempool);
778 if (length > block_size_max)
779 length = pa_frame_align(block_size_max, &s->sample_spec);
780
781 pa_assert(length > 0);
782
783 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
784
785 if (n == 0) {
786
787 *result = s->silence;
788 pa_memblock_ref(result->memblock);
789
790 if (result->length > length)
791 result->length = length;
792
793 } else if (n == 1) {
794 pa_cvolume volume;
795
796 *result = info[0].chunk;
797 pa_memblock_ref(result->memblock);
798
799 if (result->length > length)
800 result->length = length;
801
802 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
803
804 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
805 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
806 pa_memblock_unref(result->memblock);
807 pa_silence_memchunk_get(&s->core->silence_cache,
808 s->core->mempool,
809 result,
810 &s->sample_spec,
811 result->length);
812 } else {
813 pa_memchunk_make_writable(result, 0);
814 pa_volume_memchunk(result, &s->sample_spec, &volume);
815 }
816 }
817 } else {
818 void *ptr;
819 result->memblock = pa_memblock_new(s->core->mempool, length);
820
821 ptr = pa_memblock_acquire(result->memblock);
822 result->length = pa_mix(info, n,
823 ptr, length,
824 &s->sample_spec,
825 &s->thread_info.soft_volume,
826 s->thread_info.soft_muted);
827 pa_memblock_release(result->memblock);
828
829 result->index = 0;
830 }
831
832 inputs_drop(s, info, n, result);
833
834 pa_sink_unref(s);
835 }
836
837 /* Called from IO thread context */
838 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
839 pa_mix_info info[MAX_MIX_CHANNELS];
840 unsigned n;
841 size_t length, block_size_max;
842
843 pa_sink_assert_ref(s);
844 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
845 pa_assert(target);
846 pa_assert(target->memblock);
847 pa_assert(target->length > 0);
848 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
849
850 pa_sink_ref(s);
851
852 pa_assert(!s->thread_info.rewind_requested);
853 pa_assert(s->thread_info.rewind_nbytes == 0);
854
855 if (s->thread_info.state == PA_SINK_SUSPENDED) {
856 pa_silence_memchunk(target, &s->sample_spec);
857 return;
858 }
859
860 length = target->length;
861 block_size_max = pa_mempool_block_size_max(s->core->mempool);
862 if (length > block_size_max)
863 length = pa_frame_align(block_size_max, &s->sample_spec);
864
865 pa_assert(length > 0);
866
867 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
868
869 if (n == 0) {
870 if (target->length > length)
871 target->length = length;
872
873 pa_silence_memchunk(target, &s->sample_spec);
874 } else if (n == 1) {
875 pa_cvolume volume;
876
877 if (target->length > length)
878 target->length = length;
879
880 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
881
882 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
883 pa_silence_memchunk(target, &s->sample_spec);
884 else {
885 pa_memchunk vchunk;
886
887 vchunk = info[0].chunk;
888 pa_memblock_ref(vchunk.memblock);
889
890 if (vchunk.length > length)
891 vchunk.length = length;
892
893 if (!pa_cvolume_is_norm(&volume)) {
894 pa_memchunk_make_writable(&vchunk, 0);
895 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
896 }
897
898 pa_memchunk_memcpy(target, &vchunk);
899 pa_memblock_unref(vchunk.memblock);
900 }
901
902 } else {
903 void *ptr;
904
905 ptr = pa_memblock_acquire(target->memblock);
906
907 target->length = pa_mix(info, n,
908 (uint8_t*) ptr + target->index, length,
909 &s->sample_spec,
910 &s->thread_info.soft_volume,
911 s->thread_info.soft_muted);
912
913 pa_memblock_release(target->memblock);
914 }
915
916 inputs_drop(s, info, n, target);
917
918 pa_sink_unref(s);
919 }
920
921 /* Called from IO thread context */
922 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
923 pa_memchunk chunk;
924 size_t l, d;
925
926 pa_sink_assert_ref(s);
927 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
928 pa_assert(target);
929 pa_assert(target->memblock);
930 pa_assert(target->length > 0);
931 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
932
933 pa_sink_ref(s);
934
935 pa_assert(!s->thread_info.rewind_requested);
936 pa_assert(s->thread_info.rewind_nbytes == 0);
937
938 l = target->length;
939 d = 0;
940 while (l > 0) {
941 chunk = *target;
942 chunk.index += d;
943 chunk.length -= d;
944
945 pa_sink_render_into(s, &chunk);
946
947 d += chunk.length;
948 l -= chunk.length;
949 }
950
951 pa_sink_unref(s);
952 }
953
954 /* Called from IO thread context */
955 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
956 pa_mix_info info[MAX_MIX_CHANNELS];
957 size_t length1st = length;
958 unsigned n;
959
960 pa_sink_assert_ref(s);
961 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
962 pa_assert(length > 0);
963 pa_assert(pa_frame_aligned(length, &s->sample_spec));
964 pa_assert(result);
965
966 pa_sink_ref(s);
967
968 pa_assert(!s->thread_info.rewind_requested);
969 pa_assert(s->thread_info.rewind_nbytes == 0);
970
971 pa_assert(length > 0);
972
973 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
974
975 if (n == 0) {
976 pa_silence_memchunk_get(&s->core->silence_cache,
977 s->core->mempool,
978 result,
979 &s->sample_spec,
980 length1st);
981 } else if (n == 1) {
982 pa_cvolume volume;
983
984 *result = info[0].chunk;
985 pa_memblock_ref(result->memblock);
986
987 if (result->length > length)
988 result->length = length;
989
990 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
991
992 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
993 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
994 pa_memblock_unref(result->memblock);
995 pa_silence_memchunk_get(&s->core->silence_cache,
996 s->core->mempool,
997 result,
998 &s->sample_spec,
999 result->length);
1000 } else {
1001 pa_memchunk_make_writable(result, length);
1002 pa_volume_memchunk(result, &s->sample_spec, &volume);
1003 }
1004 }
1005 } else {
1006 void *ptr;
1007
1008 result->index = 0;
1009 result->memblock = pa_memblock_new(s->core->mempool, length);
1010
1011 ptr = pa_memblock_acquire(result->memblock);
1012
1013 result->length = pa_mix(info, n,
1014 (uint8_t*) ptr + result->index, length1st,
1015 &s->sample_spec,
1016 &s->thread_info.soft_volume,
1017 s->thread_info.soft_muted);
1018
1019 pa_memblock_release(result->memblock);
1020 }
1021
1022 inputs_drop(s, info, n, result);
1023
1024 if (result->length < length) {
1025 pa_memchunk chunk;
1026 size_t l, d;
1027 pa_memchunk_make_writable(result, length);
1028 result->length = length;
1029
1030 l = length - result->length;
1031 d = result->index + result->length;
1032 while (l > 0) {
1033 chunk = *result;
1034 chunk.index += d;
1035 chunk.length -= d - result->index;
1036
1037 pa_sink_render_into(s, &chunk);
1038
1039 d += chunk.length;
1040 l -= chunk.length;
1041 }
1042 result->length = length;
1043 }
1044
1045 pa_sink_unref(s);
1046 }
1047
1048 /* Called from main thread */
1049 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1050 pa_usec_t usec = 0;
1051
1052 pa_sink_assert_ref(s);
1053 pa_assert(PA_SINK_IS_LINKED(s->state));
1054
1055 /* The returned value is supposed to be in the time domain of the sound card! */
1056
1057 if (s->state == PA_SINK_SUSPENDED)
1058 return 0;
1059
1060 if (!(s->flags & PA_SINK_LATENCY))
1061 return 0;
1062
1063 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1064
1065 return usec;
1066 }
1067
1068 /* Called from IO thread */
1069 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1070 pa_usec_t usec = 0;
1071 pa_msgobject *o;
1072
1073 pa_sink_assert_ref(s);
1074 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1075
1076 /* The returned value is supposed to be in the time domain of the sound card! */
1077
1078 if (s->thread_info.state == PA_SINK_SUSPENDED)
1079 return 0;
1080
1081 if (!(s->flags & PA_SINK_LATENCY))
1082 return 0;
1083
1084 o = PA_MSGOBJECT(s);
1085
1086 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1087
1088 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1089 return -1;
1090
1091 return usec;
1092 }
1093
1094 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1095 unsigned c;
1096
1097 pa_sink_input_assert_ref(i);
1098 pa_assert(new_volume->channels == i->sample_spec.channels);
1099
1100 /*
1101 * This basically calculates:
1102 *
1103 * i->relative_volume := i->virtual_volume / new_volume
1104 * i->soft_volume := i->relative_volume * i->volume_factor
1105 */
1106
1107 /* The new sink volume passed in here must already be remapped to
1108 * the sink input's channel map! */
1109
1110 i->soft_volume.channels = i->sample_spec.channels;
1111
1112 for (c = 0; c < i->sample_spec.channels; c++)
1113
1114 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1115 /* We leave i->relative_volume untouched */
1116 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1117 else {
1118 i->relative_volume[c] =
1119 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1120 pa_sw_volume_to_linear(new_volume->values[c]);
1121
1122 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1123 i->relative_volume[c] *
1124 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1125 }
1126
1127 /* Hooks have the ability to play games with i->soft_volume */
1128 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1129
1130 /* We don't copy the soft_volume to the thread_info data
1131 * here. That must be done by the caller */
1132 }
1133
1134 /* Called from main thread */
1135 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1136 pa_sink_input *i;
1137 uint32_t idx;
1138
1139 pa_sink_assert_ref(s);
1140 pa_assert(new_volume);
1141 pa_assert(PA_SINK_IS_LINKED(s->state));
1142 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1143
1144 /* This is called whenever a sink input volume changes or a sink
1145 * input is added/removed and we might need to fix up the sink
1146 * volume accordingly. Please note that we don't actually update
1147 * the sinks volume here, we only return how it needs to be
1148 * updated. The caller should then call pa_sink_set_volume().*/
1149
1150 if (pa_idxset_isempty(s->inputs)) {
1151 /* In the special case that we have no sink input we leave the
1152 * volume unmodified. */
1153 *new_volume = s->reference_volume;
1154 return;
1155 }
1156
1157 pa_cvolume_mute(new_volume, s->channel_map.channels);
1158
1159 /* First let's determine the new maximum volume of all inputs
1160 * connected to this sink */
1161 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1162 unsigned c;
1163 pa_cvolume remapped_volume;
1164
1165 remapped_volume = i->virtual_volume;
1166 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1167
1168 for (c = 0; c < new_volume->channels; c++)
1169 if (remapped_volume.values[c] > new_volume->values[c])
1170 new_volume->values[c] = remapped_volume.values[c];
1171 }
1172
1173 /* Then, let's update the soft volumes of all inputs connected
1174 * to this sink */
1175 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1176 pa_cvolume remapped_new_volume;
1177
1178 remapped_new_volume = *new_volume;
1179 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1180 compute_new_soft_volume(i, &remapped_new_volume);
1181
1182 /* We don't copy soft_volume to the thread_info data here
1183 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1184 * want the update to be atomically with the sink volume
1185 * update, hence we do it within the pa_sink_set_volume() call
1186 * below */
1187 }
1188 }
1189
1190 /* Called from main thread */
1191 void pa_sink_propagate_flat_volume(pa_sink *s) {
1192 pa_sink_input *i;
1193 uint32_t idx;
1194
1195 pa_sink_assert_ref(s);
1196 pa_assert(PA_SINK_IS_LINKED(s->state));
1197 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1198
1199 /* This is called whenever the sink volume changes that is not
1200 * caused by a sink input volume change. We need to fix up the
1201 * sink input volumes accordingly */
1202
1203 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1204 pa_cvolume sink_volume, new_virtual_volume;
1205 unsigned c;
1206
1207 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1208
1209 sink_volume = s->virtual_volume;
1210 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1211
1212 for (c = 0; c < i->sample_spec.channels; c++)
1213 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1214 i->relative_volume[c] *
1215 pa_sw_volume_to_linear(sink_volume.values[c]));
1216
1217 new_virtual_volume.channels = i->sample_spec.channels;
1218
1219 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1220 i->virtual_volume = new_virtual_volume;
1221
1222 /* Hmm, the soft volume might no longer actually match
1223 * what has been chosen as new virtual volume here,
1224 * especially when the old volume was
1225 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1226 * volumes here. */
1227 compute_new_soft_volume(i, &sink_volume);
1228
1229 /* The virtual volume changed, let's tell people so */
1230 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1231 }
1232 }
1233
1234 /* If the soft_volume of any of the sink inputs got changed, let's
1235 * make sure the thread copies are synced up. */
1236 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1237 }
1238
1239 /* Called from main thread */
1240 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference) {
1241 pa_bool_t virtual_volume_changed;
1242
1243 pa_sink_assert_ref(s);
1244 pa_assert(PA_SINK_IS_LINKED(s->state));
1245 pa_assert(volume);
1246 pa_assert(pa_cvolume_valid(volume));
1247 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1248
1249 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1250 s->virtual_volume = *volume;
1251
1252 if (become_reference)
1253 s->reference_volume = s->virtual_volume;
1254
1255 /* Propagate this volume change back to the inputs */
1256 if (virtual_volume_changed)
1257 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1258 pa_sink_propagate_flat_volume(s);
1259
1260 if (s->set_volume) {
1261 /* If we have a function set_volume(), then we do not apply a
1262 * soft volume by default. However, set_volume() is free to
1263 * apply one to s->soft_volume */
1264
1265 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1266 s->set_volume(s);
1267
1268 } else
1269 /* If we have no function set_volume(), then the soft volume
1270 * becomes the virtual volume */
1271 s->soft_volume = s->virtual_volume;
1272
1273 /* This tells the sink that soft and/or virtual volume changed */
1274 if (sendmsg)
1275 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1276
1277 if (virtual_volume_changed)
1278 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1279 }
1280
1281 /* Called from main thread. Only to be called by sink implementor */
1282 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1283 pa_sink_assert_ref(s);
1284 pa_assert(volume);
1285
1286 s->soft_volume = *volume;
1287
1288 if (PA_SINK_IS_LINKED(s->state))
1289 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1290 else
1291 s->thread_info.soft_volume = *volume;
1292 }
1293
1294 /* Called from main thread */
1295 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1296 pa_sink_assert_ref(s);
1297
1298 if (s->refresh_volume || force_refresh) {
1299 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1300
1301 if (s->get_volume)
1302 s->get_volume(s);
1303
1304 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1305
1306 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1307
1308 s->reference_volume = s->virtual_volume;
1309
1310 if (s->flags & PA_SINK_FLAT_VOLUME)
1311 pa_sink_propagate_flat_volume(s);
1312
1313 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1314 }
1315 }
1316
1317 return reference ? &s->reference_volume : &s->virtual_volume;
1318 }
1319
1320 /* Called from main thread */
1321 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1322 pa_sink_assert_ref(s);
1323
1324 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1325
1326 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1327 return;
1328
1329 s->reference_volume = s->virtual_volume = *new_volume;
1330
1331 if (s->flags & PA_SINK_FLAT_VOLUME)
1332 pa_sink_propagate_flat_volume(s);
1333
1334 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1335 }
1336
1337 /* Called from main thread */
1338 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1339 pa_bool_t old_muted;
1340
1341 pa_sink_assert_ref(s);
1342 pa_assert(PA_SINK_IS_LINKED(s->state));
1343
1344 old_muted = s->muted;
1345 s->muted = mute;
1346
1347 if (s->set_mute)
1348 s->set_mute(s);
1349
1350 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1351
1352 if (old_muted != s->muted)
1353 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1354 }
1355
1356 /* Called from main thread */
1357 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1358
1359 pa_sink_assert_ref(s);
1360
1361 if (s->refresh_muted || force_refresh) {
1362 pa_bool_t old_muted = s->muted;
1363
1364 if (s->get_mute)
1365 s->get_mute(s);
1366
1367 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1368
1369 if (old_muted != s->muted)
1370 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1371 }
1372
1373 return s->muted;
1374 }
1375
1376 /* Called from main thread */
1377 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1378 pa_sink_assert_ref(s);
1379
1380 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1381
1382 if (s->muted == new_muted)
1383 return;
1384
1385 s->muted = new_muted;
1386 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1387 }
1388
1389 /* Called from main thread */
1390 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1391 pa_sink_assert_ref(s);
1392
1393 if (p)
1394 pa_proplist_update(s->proplist, mode, p);
1395
1396 if (PA_SINK_IS_LINKED(s->state)) {
1397 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1398 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1399 }
1400
1401 return TRUE;
1402 }
1403
1404 /* Called from main thread */
1405 void pa_sink_set_description(pa_sink *s, const char *description) {
1406 const char *old;
1407 pa_sink_assert_ref(s);
1408
1409 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1410 return;
1411
1412 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1413
1414 if (old && description && !strcmp(old, description))
1415 return;
1416
1417 if (description)
1418 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1419 else
1420 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1421
1422 if (s->monitor_source) {
1423 char *n;
1424
1425 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1426 pa_source_set_description(s->monitor_source, n);
1427 pa_xfree(n);
1428 }
1429
1430 if (PA_SINK_IS_LINKED(s->state)) {
1431 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1432 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1433 }
1434 }
1435
1436 /* Called from main thread */
1437 unsigned pa_sink_linked_by(pa_sink *s) {
1438 unsigned ret;
1439
1440 pa_sink_assert_ref(s);
1441 pa_assert(PA_SINK_IS_LINKED(s->state));
1442
1443 ret = pa_idxset_size(s->inputs);
1444
1445 /* We add in the number of streams connected to us here. Please
1446 * note the asymmmetry to pa_sink_used_by()! */
1447
1448 if (s->monitor_source)
1449 ret += pa_source_linked_by(s->monitor_source);
1450
1451 return ret;
1452 }
1453
1454 /* Called from main thread */
1455 unsigned pa_sink_used_by(pa_sink *s) {
1456 unsigned ret;
1457
1458 pa_sink_assert_ref(s);
1459 pa_assert(PA_SINK_IS_LINKED(s->state));
1460
1461 ret = pa_idxset_size(s->inputs);
1462 pa_assert(ret >= s->n_corked);
1463
1464 /* Streams connected to our monitor source do not matter for
1465 * pa_sink_used_by()!.*/
1466
1467 return ret - s->n_corked;
1468 }
1469
1470 /* Called from main thread */
1471 unsigned pa_sink_check_suspend(pa_sink *s) {
1472 unsigned ret;
1473 pa_sink_input *i;
1474 uint32_t idx;
1475
1476 pa_sink_assert_ref(s);
1477
1478 if (!PA_SINK_IS_LINKED(s->state))
1479 return 0;
1480
1481 ret = 0;
1482
1483 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1484 pa_sink_input_state_t st;
1485
1486 st = pa_sink_input_get_state(i);
1487 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1488
1489 if (st == PA_SINK_INPUT_CORKED)
1490 continue;
1491
1492 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1493 continue;
1494
1495 ret ++;
1496 }
1497
1498 if (s->monitor_source)
1499 ret += pa_source_check_suspend(s->monitor_source);
1500
1501 return ret;
1502 }
1503
1504 /* Called from the IO thread */
1505 static void sync_input_volumes_within_thread(pa_sink *s) {
1506 pa_sink_input *i;
1507 void *state = NULL;
1508
1509 pa_sink_assert_ref(s);
1510
1511 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1512 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1513 continue;
1514
1515 i->thread_info.soft_volume = i->soft_volume;
1516 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1517 }
1518 }
1519
1520 /* Called from IO thread, except when it is not */
1521 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1522 pa_sink *s = PA_SINK(o);
1523 pa_sink_assert_ref(s);
1524
1525 switch ((pa_sink_message_t) code) {
1526
1527 case PA_SINK_MESSAGE_ADD_INPUT: {
1528 pa_sink_input *i = PA_SINK_INPUT(userdata);
1529
1530 /* If you change anything here, make sure to change the
1531 * sink input handling a few lines down at
1532 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1533
1534 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1535
1536 /* Since the caller sleeps in pa_sink_input_put(), we can
1537 * safely access data outside of thread_info even though
1538 * it is mutable */
1539
1540 if ((i->thread_info.sync_prev = i->sync_prev)) {
1541 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1542 pa_assert(i->sync_prev->sync_next == i);
1543 i->thread_info.sync_prev->thread_info.sync_next = i;
1544 }
1545
1546 if ((i->thread_info.sync_next = i->sync_next)) {
1547 pa_assert(i->sink == i->thread_info.sync_next->sink);
1548 pa_assert(i->sync_next->sync_prev == i);
1549 i->thread_info.sync_next->thread_info.sync_prev = i;
1550 }
1551
1552 pa_assert(!i->thread_info.attached);
1553 i->thread_info.attached = TRUE;
1554
1555 if (i->attach)
1556 i->attach(i);
1557
1558 pa_sink_input_set_state_within_thread(i, i->state);
1559
1560 /* The requested latency of the sink input needs to be
1561 * fixed up and then configured on the sink */
1562
1563 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1564 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1565
1566 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1567 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1568
1569 /* We don't rewind here automatically. This is left to the
1570 * sink input implementor because some sink inputs need a
1571 * slow start, i.e. need some time to buffer client
1572 * samples before beginning streaming. */
1573
1574 /* In flat volume mode we need to update the volume as
1575 * well */
1576 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1577 }
1578
1579 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1580 pa_sink_input *i = PA_SINK_INPUT(userdata);
1581
1582 /* If you change anything here, make sure to change the
1583 * sink input handling a few lines down at
1584 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1585
1586 if (i->detach)
1587 i->detach(i);
1588
1589 pa_sink_input_set_state_within_thread(i, i->state);
1590
1591 pa_assert(i->thread_info.attached);
1592 i->thread_info.attached = FALSE;
1593
1594 /* Since the caller sleeps in pa_sink_input_unlink(),
1595 * we can safely access data outside of thread_info even
1596 * though it is mutable */
1597
1598 pa_assert(!i->sync_prev);
1599 pa_assert(!i->sync_next);
1600
1601 if (i->thread_info.sync_prev) {
1602 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1603 i->thread_info.sync_prev = NULL;
1604 }
1605
1606 if (i->thread_info.sync_next) {
1607 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1608 i->thread_info.sync_next = NULL;
1609 }
1610
1611 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1612 pa_sink_input_unref(i);
1613
1614 pa_sink_invalidate_requested_latency(s);
1615 pa_sink_request_rewind(s, (size_t) -1);
1616
1617 /* In flat volume mode we need to update the volume as
1618 * well */
1619 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1620 }
1621
1622 case PA_SINK_MESSAGE_START_MOVE: {
1623 pa_sink_input *i = PA_SINK_INPUT(userdata);
1624
1625 /* We don't support moving synchronized streams. */
1626 pa_assert(!i->sync_prev);
1627 pa_assert(!i->sync_next);
1628 pa_assert(!i->thread_info.sync_next);
1629 pa_assert(!i->thread_info.sync_prev);
1630
1631 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1632 pa_usec_t usec = 0;
1633 size_t sink_nbytes, total_nbytes;
1634
1635 /* Get the latency of the sink */
1636 if (!(s->flags & PA_SINK_LATENCY) ||
1637 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1638 usec = 0;
1639
1640 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1641 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1642
1643 if (total_nbytes > 0) {
1644 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1645 i->thread_info.rewrite_flush = TRUE;
1646 pa_sink_input_process_rewind(i, sink_nbytes);
1647 }
1648 }
1649
1650 if (i->detach)
1651 i->detach(i);
1652
1653 pa_assert(i->thread_info.attached);
1654 i->thread_info.attached = FALSE;
1655
1656 /* Let's remove the sink input ...*/
1657 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1658 pa_sink_input_unref(i);
1659
1660 pa_sink_invalidate_requested_latency(s);
1661
1662 pa_log_debug("Requesting rewind due to started move");
1663 pa_sink_request_rewind(s, (size_t) -1);
1664
1665 /* In flat volume mode we need to update the volume as
1666 * well */
1667 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1668 }
1669
1670 case PA_SINK_MESSAGE_FINISH_MOVE: {
1671 pa_sink_input *i = PA_SINK_INPUT(userdata);
1672
1673 /* We don't support moving synchronized streams. */
1674 pa_assert(!i->sync_prev);
1675 pa_assert(!i->sync_next);
1676 pa_assert(!i->thread_info.sync_next);
1677 pa_assert(!i->thread_info.sync_prev);
1678
1679 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1680
1681 pa_assert(!i->thread_info.attached);
1682 i->thread_info.attached = TRUE;
1683
1684 if (i->attach)
1685 i->attach(i);
1686
1687 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1688 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1689
1690 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1691 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1692
1693 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1694 pa_usec_t usec = 0;
1695 size_t nbytes;
1696
1697 /* Get the latency of the sink */
1698 if (!(s->flags & PA_SINK_LATENCY) ||
1699 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1700 usec = 0;
1701
1702 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1703
1704 if (nbytes > 0)
1705 pa_sink_input_drop(i, nbytes);
1706
1707 pa_log_debug("Requesting rewind due to finished move");
1708 pa_sink_request_rewind(s, nbytes);
1709 }
1710
1711 /* In flat volume mode we need to update the volume as
1712 * well */
1713 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1714 }
1715
1716 case PA_SINK_MESSAGE_SET_VOLUME:
1717
1718 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1719 s->thread_info.soft_volume = s->soft_volume;
1720 pa_sink_request_rewind(s, (size_t) -1);
1721 }
1722
1723 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1724 return 0;
1725
1726 /* Fall through ... */
1727
1728 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1729 sync_input_volumes_within_thread(s);
1730 return 0;
1731
1732 case PA_SINK_MESSAGE_GET_VOLUME:
1733 return 0;
1734
1735 case PA_SINK_MESSAGE_SET_MUTE:
1736
1737 if (s->thread_info.soft_muted != s->muted) {
1738 s->thread_info.soft_muted = s->muted;
1739 pa_sink_request_rewind(s, (size_t) -1);
1740 }
1741
1742 return 0;
1743
1744 case PA_SINK_MESSAGE_GET_MUTE:
1745 return 0;
1746
1747 case PA_SINK_MESSAGE_SET_STATE: {
1748
1749 pa_bool_t suspend_change =
1750 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1751 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1752
1753 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1754
1755 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1756 s->thread_info.rewind_nbytes = 0;
1757 s->thread_info.rewind_requested = FALSE;
1758 }
1759
1760 if (suspend_change) {
1761 pa_sink_input *i;
1762 void *state = NULL;
1763
1764 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1765 if (i->suspend_within_thread)
1766 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1767 }
1768
1769 return 0;
1770 }
1771
1772 case PA_SINK_MESSAGE_DETACH:
1773
1774 /* Detach all streams */
1775 pa_sink_detach_within_thread(s);
1776 return 0;
1777
1778 case PA_SINK_MESSAGE_ATTACH:
1779
1780 /* Reattach all streams */
1781 pa_sink_attach_within_thread(s);
1782 return 0;
1783
1784 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1785
1786 pa_usec_t *usec = userdata;
1787 *usec = pa_sink_get_requested_latency_within_thread(s);
1788
1789 if (*usec == (pa_usec_t) -1)
1790 *usec = s->thread_info.max_latency;
1791
1792 return 0;
1793 }
1794
1795 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1796 pa_usec_t *r = userdata;
1797
1798 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1799
1800 return 0;
1801 }
1802
1803 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1804 pa_usec_t *r = userdata;
1805
1806 r[0] = s->thread_info.min_latency;
1807 r[1] = s->thread_info.max_latency;
1808
1809 return 0;
1810 }
1811
1812 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1813
1814 *((size_t*) userdata) = s->thread_info.max_rewind;
1815 return 0;
1816
1817 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1818
1819 *((size_t*) userdata) = s->thread_info.max_request;
1820 return 0;
1821
1822 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1823
1824 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1825 return 0;
1826
1827 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1828
1829 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1830 return 0;
1831
1832 case PA_SINK_MESSAGE_GET_LATENCY:
1833 case PA_SINK_MESSAGE_MAX:
1834 ;
1835 }
1836
1837 return -1;
1838 }
1839
1840 /* Called from main thread */
1841 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1842 pa_sink *sink;
1843 uint32_t idx;
1844 int ret = 0;
1845
1846 pa_core_assert_ref(c);
1847 pa_assert(cause != 0);
1848
1849 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1850 int r;
1851
1852 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1853 ret = r;
1854 }
1855
1856 return ret;
1857 }
1858
1859 /* Called from main thread */
1860 void pa_sink_detach(pa_sink *s) {
1861 pa_sink_assert_ref(s);
1862 pa_assert(PA_SINK_IS_LINKED(s->state));
1863
1864 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1865 }
1866
1867 /* Called from main thread */
1868 void pa_sink_attach(pa_sink *s) {
1869 pa_sink_assert_ref(s);
1870 pa_assert(PA_SINK_IS_LINKED(s->state));
1871
1872 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1873 }
1874
1875 /* Called from IO thread */
1876 void pa_sink_detach_within_thread(pa_sink *s) {
1877 pa_sink_input *i;
1878 void *state = NULL;
1879
1880 pa_sink_assert_ref(s);
1881 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1882
1883 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1884 if (i->detach)
1885 i->detach(i);
1886
1887 if (s->monitor_source)
1888 pa_source_detach_within_thread(s->monitor_source);
1889 }
1890
1891 /* Called from IO thread */
1892 void pa_sink_attach_within_thread(pa_sink *s) {
1893 pa_sink_input *i;
1894 void *state = NULL;
1895
1896 pa_sink_assert_ref(s);
1897 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1898
1899 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1900 if (i->attach)
1901 i->attach(i);
1902
1903 if (s->monitor_source)
1904 pa_source_attach_within_thread(s->monitor_source);
1905 }
1906
1907 /* Called from IO thread */
1908 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1909 pa_sink_assert_ref(s);
1910 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1911
1912 if (s->thread_info.state == PA_SINK_SUSPENDED)
1913 return;
1914
1915 if (nbytes == (size_t) -1)
1916 nbytes = s->thread_info.max_rewind;
1917
1918 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1919
1920 if (s->thread_info.rewind_requested &&
1921 nbytes <= s->thread_info.rewind_nbytes)
1922 return;
1923
1924 s->thread_info.rewind_nbytes = nbytes;
1925 s->thread_info.rewind_requested = TRUE;
1926
1927 if (s->request_rewind)
1928 s->request_rewind(s);
1929 }
1930
1931 /* Called from IO thread */
1932 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1933 pa_usec_t result = (pa_usec_t) -1;
1934 pa_sink_input *i;
1935 void *state = NULL;
1936 pa_usec_t monitor_latency;
1937
1938 pa_sink_assert_ref(s);
1939
1940 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
1941 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1942
1943 if (s->thread_info.requested_latency_valid)
1944 return s->thread_info.requested_latency;
1945
1946 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1947
1948 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1949 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1950 result = i->thread_info.requested_sink_latency;
1951
1952 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1953
1954 if (monitor_latency != (pa_usec_t) -1 &&
1955 (result == (pa_usec_t) -1 || result > monitor_latency))
1956 result = monitor_latency;
1957
1958 if (result != (pa_usec_t) -1)
1959 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1960
1961 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1962 /* Only cache if properly initialized */
1963 s->thread_info.requested_latency = result;
1964 s->thread_info.requested_latency_valid = TRUE;
1965 }
1966
1967 return result;
1968 }
1969
1970 /* Called from main thread */
1971 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1972 pa_usec_t usec = 0;
1973
1974 pa_sink_assert_ref(s);
1975 pa_assert(PA_SINK_IS_LINKED(s->state));
1976
1977 if (s->state == PA_SINK_SUSPENDED)
1978 return 0;
1979
1980 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1981 return usec;
1982 }
1983
1984 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1985 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1986 pa_sink_input *i;
1987 void *state = NULL;
1988
1989 pa_sink_assert_ref(s);
1990
1991 if (max_rewind == s->thread_info.max_rewind)
1992 return;
1993
1994 s->thread_info.max_rewind = max_rewind;
1995
1996 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1997 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1998 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1999 }
2000
2001 if (s->monitor_source)
2002 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2003 }
2004
2005 /* Called from main thread */
2006 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2007 pa_sink_assert_ref(s);
2008
2009 if (PA_SINK_IS_LINKED(s->state))
2010 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2011 else
2012 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2013 }
2014
2015 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2016 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2017 void *state = NULL;
2018
2019 pa_sink_assert_ref(s);
2020
2021 if (max_request == s->thread_info.max_request)
2022 return;
2023
2024 s->thread_info.max_request = max_request;
2025
2026 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2027 pa_sink_input *i;
2028
2029 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2030 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2031 }
2032 }
2033
2034 /* Called from main thread */
2035 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2036 pa_sink_assert_ref(s);
2037
2038 if (PA_SINK_IS_LINKED(s->state))
2039 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2040 else
2041 pa_sink_set_max_request_within_thread(s, max_request);
2042 }
2043
2044 /* Called from IO thread */
2045 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2046 pa_sink_input *i;
2047 void *state = NULL;
2048
2049 pa_sink_assert_ref(s);
2050
2051 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2052 return;
2053
2054 s->thread_info.requested_latency_valid = FALSE;
2055
2056 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2057
2058 if (s->update_requested_latency)
2059 s->update_requested_latency(s);
2060
2061 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2062 if (i->update_sink_requested_latency)
2063 i->update_sink_requested_latency(i);
2064 }
2065 }
2066
2067 /* Called from main thread */
2068 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2069 pa_sink_assert_ref(s);
2070
2071 /* min_latency == 0: no limit
2072 * min_latency anything else: specified limit
2073 *
2074 * Similar for max_latency */
2075
2076 if (min_latency < ABSOLUTE_MIN_LATENCY)
2077 min_latency = ABSOLUTE_MIN_LATENCY;
2078
2079 if (max_latency <= 0 ||
2080 max_latency > ABSOLUTE_MAX_LATENCY)
2081 max_latency = ABSOLUTE_MAX_LATENCY;
2082
2083 pa_assert(min_latency <= max_latency);
2084
2085 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2086 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2087 max_latency == ABSOLUTE_MAX_LATENCY) ||
2088 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2089
2090 if (PA_SINK_IS_LINKED(s->state)) {
2091 pa_usec_t r[2];
2092
2093 r[0] = min_latency;
2094 r[1] = max_latency;
2095
2096 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2097 } else
2098 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2099 }
2100
2101 /* Called from main thread */
2102 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2103 pa_sink_assert_ref(s);
2104 pa_assert(min_latency);
2105 pa_assert(max_latency);
2106
2107 if (PA_SINK_IS_LINKED(s->state)) {
2108 pa_usec_t r[2] = { 0, 0 };
2109
2110 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2111
2112 *min_latency = r[0];
2113 *max_latency = r[1];
2114 } else {
2115 *min_latency = s->thread_info.min_latency;
2116 *max_latency = s->thread_info.max_latency;
2117 }
2118 }
2119
2120 /* Called from IO thread */
2121 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2122 void *state = NULL;
2123
2124 pa_sink_assert_ref(s);
2125
2126 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2127 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2128 pa_assert(min_latency <= max_latency);
2129
2130 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2131 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2132 max_latency == ABSOLUTE_MAX_LATENCY) ||
2133 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2134
2135 s->thread_info.min_latency = min_latency;
2136 s->thread_info.max_latency = max_latency;
2137
2138 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2139 pa_sink_input *i;
2140
2141 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2142 if (i->update_sink_latency_range)
2143 i->update_sink_latency_range(i);
2144 }
2145
2146 pa_sink_invalidate_requested_latency(s);
2147
2148 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2149 }
2150
2151 /* Called from main thread, before the sink is put */
2152 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2153 pa_sink_assert_ref(s);
2154
2155 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2156
2157 if (latency < ABSOLUTE_MIN_LATENCY)
2158 latency = ABSOLUTE_MIN_LATENCY;
2159
2160 if (latency > ABSOLUTE_MAX_LATENCY)
2161 latency = ABSOLUTE_MAX_LATENCY;
2162
2163 s->fixed_latency = latency;
2164 pa_source_set_fixed_latency(s->monitor_source, latency);
2165 }
2166
2167 /* Called from main context */
2168 size_t pa_sink_get_max_rewind(pa_sink *s) {
2169 size_t r;
2170 pa_sink_assert_ref(s);
2171
2172 if (!PA_SINK_IS_LINKED(s->state))
2173 return s->thread_info.max_rewind;
2174
2175 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2176
2177 return r;
2178 }
2179
2180 /* Called from main context */
2181 size_t pa_sink_get_max_request(pa_sink *s) {
2182 size_t r;
2183 pa_sink_assert_ref(s);
2184
2185 if (!PA_SINK_IS_LINKED(s->state))
2186 return s->thread_info.max_request;
2187
2188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2189
2190 return r;
2191 }
2192
2193 /* Called from main context */
2194 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2195 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2196
2197 pa_assert(p);
2198
2199 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2200 return TRUE;
2201
2202 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2203
2204 if (pa_streq(ff, "microphone"))
2205 t = "audio-input-microphone";
2206 else if (pa_streq(ff, "webcam"))
2207 t = "camera-web";
2208 else if (pa_streq(ff, "computer"))
2209 t = "computer";
2210 else if (pa_streq(ff, "handset"))
2211 t = "phone";
2212 else if (pa_streq(ff, "portable"))
2213 t = "multimedia-player";
2214 else if (pa_streq(ff, "tv"))
2215 t = "video-display";
2216
2217 /*
2218 * The following icons are not part of the icon naming spec,
2219 * because Rodney Dawes sucks as the maintainer of that spec.
2220 *
2221 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2222 */
2223 else if (pa_streq(ff, "headset"))
2224 t = "audio-headset";
2225 else if (pa_streq(ff, "headphone"))
2226 t = "audio-headphones";
2227 else if (pa_streq(ff, "speaker"))
2228 t = "audio-speakers";
2229 else if (pa_streq(ff, "hands-free"))
2230 t = "audio-handsfree";
2231 }
2232
2233 if (!t)
2234 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2235 if (pa_streq(c, "modem"))
2236 t = "modem";
2237
2238 if (!t) {
2239 if (is_sink)
2240 t = "audio-card";
2241 else
2242 t = "audio-input-microphone";
2243 }
2244
2245 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2246 if (strstr(profile, "analog"))
2247 s = "-analog";
2248 else if (strstr(profile, "iec958"))
2249 s = "-iec958";
2250 else if (strstr(profile, "hdmi"))
2251 s = "-hdmi";
2252 }
2253
2254 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2255
2256 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2257
2258 return TRUE;
2259 }
2260
2261 pa_bool_t pa_device_init_description(pa_proplist *p) {
2262 const char *s;
2263 pa_assert(p);
2264
2265 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2266 return TRUE;
2267
2268 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2269 if (pa_streq(s, "internal")) {
2270 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2271 return TRUE;
2272 }
2273
2274 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2275 if (pa_streq(s, "modem")) {
2276 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2277 return TRUE;
2278 }
2279
2280 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2281 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2282 return TRUE;
2283 }
2284
2285 return FALSE;
2286 }