]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
optimization: Optimized pa_sink_render_full.
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_done(pa_sink_new_data *data) {
104 pa_assert(data);
105
106 pa_xfree(data->name);
107 pa_proplist_free(data->proplist);
108 }
109
110 /* Called from main context */
111 static void reset_callbacks(pa_sink *s) {
112 pa_assert(s);
113
114 s->set_state = NULL;
115 s->get_volume = NULL;
116 s->set_volume = NULL;
117 s->get_mute = NULL;
118 s->set_mute = NULL;
119 s->request_rewind = NULL;
120 s->update_requested_latency = NULL;
121 }
122
123 /* Called from main context */
124 pa_sink* pa_sink_new(
125 pa_core *core,
126 pa_sink_new_data *data,
127 pa_sink_flags_t flags) {
128
129 pa_sink *s;
130 const char *name;
131 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
132 pa_source_new_data source_data;
133 const char *dn;
134 char *pt;
135
136 pa_assert(core);
137 pa_assert(data);
138 pa_assert(data->name);
139
140 s = pa_msgobject_new(pa_sink);
141
142 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
143 pa_xfree(s);
144 return NULL;
145 }
146
147 pa_sink_new_data_set_name(data, name);
148
149 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
150 pa_xfree(s);
151 pa_namereg_unregister(core, name);
152 return NULL;
153 }
154
155 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
156 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
157
158 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
159
160 if (!data->channel_map_is_set)
161 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
162
163 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
164 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
165
166 if (!data->volume_is_set)
167 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
168
169 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
170 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
171
172 if (!data->muted_is_set)
173 data->muted = FALSE;
174
175 if (data->card)
176 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
177
178 pa_device_init_description(data->proplist);
179 pa_device_init_icon(data->proplist, TRUE);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 s->parent.parent.free = sink_free;
188 s->parent.process_msg = pa_sink_process_msg;
189
190 s->core = core;
191 s->state = PA_SINK_INIT;
192 s->flags = flags;
193 s->suspend_cause = 0;
194 s->name = pa_xstrdup(name);
195 s->proplist = pa_proplist_copy(data->proplist);
196 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
197 s->module = data->module;
198 s->card = data->card;
199
200 s->sample_spec = data->sample_spec;
201 s->channel_map = data->channel_map;
202
203 s->inputs = pa_idxset_new(NULL, NULL);
204 s->n_corked = 0;
205
206 s->reference_volume = s->virtual_volume = data->volume;
207 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
208 s->base_volume = PA_VOLUME_NORM;
209 s->n_volume_steps = PA_VOLUME_NORM+1;
210 s->muted = data->muted;
211 s->refresh_volume = s->refresh_muted = FALSE;
212
213 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
214
215 reset_callbacks(s);
216 s->userdata = NULL;
217
218 s->asyncmsgq = NULL;
219 s->rtpoll = NULL;
220
221 pa_silence_memchunk_get(
222 &core->silence_cache,
223 core->mempool,
224 &s->silence,
225 &s->sample_spec,
226 0);
227
228 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
229 s->thread_info.soft_volume = s->soft_volume;
230 s->thread_info.soft_muted = s->muted;
231 s->thread_info.state = s->state;
232 s->thread_info.rewind_nbytes = 0;
233 s->thread_info.rewind_requested = FALSE;
234 s->thread_info.max_rewind = 0;
235 s->thread_info.max_request = 0;
236 s->thread_info.requested_latency_valid = FALSE;
237 s->thread_info.requested_latency = 0;
238 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
239 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
240
241 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
242
243 if (s->card)
244 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
245
246 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
247 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
248 s->index,
249 s->name,
250 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
251 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
252 pt);
253 pa_xfree(pt);
254
255 pa_source_new_data_init(&source_data);
256 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
257 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
258 source_data.name = pa_sprintf_malloc("%s.monitor", name);
259 source_data.driver = data->driver;
260 source_data.module = data->module;
261 source_data.card = data->card;
262
263 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
264 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
265 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
266
267 s->monitor_source = pa_source_new(core, &source_data,
268 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
269 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
270
271 pa_source_new_data_done(&source_data);
272
273 if (!s->monitor_source) {
274 pa_sink_unlink(s);
275 pa_sink_unref(s);
276 return NULL;
277 }
278
279 s->monitor_source->monitor_of = s;
280
281 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
282 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
283
284 return s;
285 }
286
287 /* Called from main context */
288 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
289 int ret;
290 pa_bool_t suspend_change;
291 pa_sink_state_t original_state;
292
293 pa_assert(s);
294
295 if (s->state == state)
296 return 0;
297
298 original_state = s->state;
299
300 suspend_change =
301 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
302 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
303
304 if (s->set_state)
305 if ((ret = s->set_state(s, state)) < 0)
306 return ret;
307
308 if (s->asyncmsgq)
309 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
310
311 if (s->set_state)
312 s->set_state(s, original_state);
313
314 return ret;
315 }
316
317 s->state = state;
318
319 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
320 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
321 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
322 }
323
324 if (suspend_change) {
325 pa_sink_input *i;
326 uint32_t idx;
327
328 /* We're suspending or resuming, tell everyone about it */
329
330 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
331 if (s->state == PA_SINK_SUSPENDED &&
332 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
333 pa_sink_input_kill(i);
334 else if (i->suspend)
335 i->suspend(i, state == PA_SINK_SUSPENDED);
336
337 if (s->monitor_source)
338 pa_source_sync_suspend(s->monitor_source);
339 }
340
341 return 0;
342 }
343
344 /* Called from main context */
345 void pa_sink_put(pa_sink* s) {
346 pa_sink_assert_ref(s);
347
348 pa_assert(s->state == PA_SINK_INIT);
349
350 /* The following fields must be initialized properly when calling _put() */
351 pa_assert(s->asyncmsgq);
352 pa_assert(s->rtpoll);
353 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
354
355 /* Generally, flags should be initialized via pa_sink_new(). As a
356 * special exception we allow volume related flags to be set
357 * between _new() and _put(). */
358
359 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
360 s->flags |= PA_SINK_DECIBEL_VOLUME;
361
362 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
363 s->flags |= PA_SINK_FLAT_VOLUME;
364
365 s->thread_info.soft_volume = s->soft_volume;
366 s->thread_info.soft_muted = s->muted;
367
368 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
369 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
370 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
371 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
372 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
373
374 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
375 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
376 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
377
378 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
379
380 pa_source_put(s->monitor_source);
381
382 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
383 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
384 }
385
386 /* Called from main context */
387 void pa_sink_unlink(pa_sink* s) {
388 pa_bool_t linked;
389 pa_sink_input *i, *j = NULL;
390
391 pa_assert(s);
392
393 /* Please note that pa_sink_unlink() does more than simply
394 * reversing pa_sink_put(). It also undoes the registrations
395 * already done in pa_sink_new()! */
396
397 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
398 * may be called multiple times on the same sink without bad
399 * effects. */
400
401 linked = PA_SINK_IS_LINKED(s->state);
402
403 if (linked)
404 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
405
406 if (s->state != PA_SINK_UNLINKED)
407 pa_namereg_unregister(s->core, s->name);
408 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
409
410 if (s->card)
411 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
412
413 while ((i = pa_idxset_first(s->inputs, NULL))) {
414 pa_assert(i != j);
415 pa_sink_input_kill(i);
416 j = i;
417 }
418
419 if (linked)
420 sink_set_state(s, PA_SINK_UNLINKED);
421 else
422 s->state = PA_SINK_UNLINKED;
423
424 reset_callbacks(s);
425
426 if (s->monitor_source)
427 pa_source_unlink(s->monitor_source);
428
429 if (linked) {
430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
431 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
432 }
433 }
434
435 /* Called from main context */
436 static void sink_free(pa_object *o) {
437 pa_sink *s = PA_SINK(o);
438 pa_sink_input *i;
439
440 pa_assert(s);
441 pa_assert(pa_sink_refcnt(s) == 0);
442
443 if (PA_SINK_IS_LINKED(s->state))
444 pa_sink_unlink(s);
445
446 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
447
448 if (s->monitor_source) {
449 pa_source_unref(s->monitor_source);
450 s->monitor_source = NULL;
451 }
452
453 pa_idxset_free(s->inputs, NULL, NULL);
454
455 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
456 pa_sink_input_unref(i);
457
458 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
459
460 if (s->silence.memblock)
461 pa_memblock_unref(s->silence.memblock);
462
463 pa_xfree(s->name);
464 pa_xfree(s->driver);
465
466 if (s->proplist)
467 pa_proplist_free(s->proplist);
468
469 pa_xfree(s);
470 }
471
472 /* Called from main context */
473 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
474 pa_sink_assert_ref(s);
475
476 s->asyncmsgq = q;
477
478 if (s->monitor_source)
479 pa_source_set_asyncmsgq(s->monitor_source, q);
480 }
481
482 /* Called from main context */
483 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
484 pa_sink_assert_ref(s);
485
486 s->rtpoll = p;
487 if (s->monitor_source)
488 pa_source_set_rtpoll(s->monitor_source, p);
489 }
490
491 /* Called from main context */
492 int pa_sink_update_status(pa_sink*s) {
493 pa_sink_assert_ref(s);
494 pa_assert(PA_SINK_IS_LINKED(s->state));
495
496 if (s->state == PA_SINK_SUSPENDED)
497 return 0;
498
499 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
500 }
501
502 /* Called from main context */
503 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
504 pa_sink_assert_ref(s);
505 pa_assert(PA_SINK_IS_LINKED(s->state));
506 pa_assert(cause != 0);
507
508 if (suspend) {
509 s->suspend_cause |= cause;
510 s->monitor_source->suspend_cause |= cause;
511 } else {
512 s->suspend_cause &= ~cause;
513 s->monitor_source->suspend_cause &= ~cause;
514 }
515
516 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
517 return 0;
518
519 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
520
521 if (s->suspend_cause)
522 return sink_set_state(s, PA_SINK_SUSPENDED);
523 else
524 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
525 }
526
527 /* Called from main context */
528 pa_queue *pa_sink_move_all_start(pa_sink *s) {
529 pa_queue *q;
530 pa_sink_input *i, *n;
531 uint32_t idx;
532
533 pa_sink_assert_ref(s);
534 pa_assert(PA_SINK_IS_LINKED(s->state));
535
536 q = pa_queue_new();
537
538 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
539 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
540
541 pa_sink_input_ref(i);
542
543 if (pa_sink_input_start_move(i) >= 0)
544 pa_queue_push(q, i);
545 else
546 pa_sink_input_unref(i);
547 }
548
549 return q;
550 }
551
552 /* Called from main context */
553 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
554 pa_sink_input *i;
555
556 pa_sink_assert_ref(s);
557 pa_assert(PA_SINK_IS_LINKED(s->state));
558 pa_assert(q);
559
560 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
561 if (pa_sink_input_finish_move(i, s, save) < 0)
562 pa_sink_input_kill(i);
563
564 pa_sink_input_unref(i);
565 }
566
567 pa_queue_free(q, NULL, NULL);
568 }
569
570 /* Called from main context */
571 void pa_sink_move_all_fail(pa_queue *q) {
572 pa_sink_input *i;
573 pa_assert(q);
574
575 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
576 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
577 pa_sink_input_kill(i);
578 pa_sink_input_unref(i);
579 }
580 }
581
582 pa_queue_free(q, NULL, NULL);
583 }
584
585 /* Called from IO thread context */
586 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
587 pa_sink_input *i;
588 void *state = NULL;
589 pa_sink_assert_ref(s);
590 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
591
592 /* If nobody requested this and this is actually no real rewind
593 * then we can short cut this */
594 if (!s->thread_info.rewind_requested && nbytes <= 0)
595 return;
596
597 s->thread_info.rewind_nbytes = 0;
598 s->thread_info.rewind_requested = FALSE;
599
600 if (s->thread_info.state == PA_SINK_SUSPENDED)
601 return;
602
603 if (nbytes > 0)
604 pa_log_debug("Processing rewind...");
605
606 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
607 pa_sink_input_assert_ref(i);
608 pa_sink_input_process_rewind(i, nbytes);
609 }
610
611 if (nbytes > 0)
612 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
613 pa_source_process_rewind(s->monitor_source, nbytes);
614 }
615
616 /* Called from IO thread context */
617 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
618 pa_sink_input *i;
619 unsigned n = 0;
620 void *state = NULL;
621 size_t mixlength = *length;
622
623 pa_sink_assert_ref(s);
624 pa_assert(info);
625
626 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
627 pa_sink_input_assert_ref(i);
628
629 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
630
631 if (mixlength == 0 || info->chunk.length < mixlength)
632 mixlength = info->chunk.length;
633
634 if (pa_memblock_is_silence(info->chunk.memblock)) {
635 pa_memblock_unref(info->chunk.memblock);
636 continue;
637 }
638
639 info->userdata = pa_sink_input_ref(i);
640
641 pa_assert(info->chunk.memblock);
642 pa_assert(info->chunk.length > 0);
643
644 info++;
645 n++;
646 maxinfo--;
647 }
648
649 if (mixlength > 0)
650 *length = mixlength;
651
652 return n;
653 }
654
655 /* Called from IO thread context */
656 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
657 pa_sink_input *i;
658 void *state = NULL;
659 unsigned p = 0;
660 unsigned n_unreffed = 0;
661
662 pa_sink_assert_ref(s);
663 pa_assert(result);
664 pa_assert(result->memblock);
665 pa_assert(result->length > 0);
666
667 /* We optimize for the case where the order of the inputs has not changed */
668
669 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
670 unsigned j;
671 pa_mix_info* m = NULL;
672
673 pa_sink_input_assert_ref(i);
674
675 /* Let's try to find the matching entry info the pa_mix_info array */
676 for (j = 0; j < n; j ++) {
677
678 if (info[p].userdata == i) {
679 m = info + p;
680 break;
681 }
682
683 p++;
684 if (p >= n)
685 p = 0;
686 }
687
688 /* Drop read data */
689 pa_sink_input_drop(i, result->length);
690
691 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
692
693 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
694 void *ostate = NULL;
695 pa_source_output *o;
696 pa_memchunk c;
697
698 if (m && m->chunk.memblock) {
699 c = m->chunk;
700 pa_memblock_ref(c.memblock);
701 pa_assert(result->length <= c.length);
702 c.length = result->length;
703
704 pa_memchunk_make_writable(&c, 0);
705 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
706 } else {
707 c = s->silence;
708 pa_memblock_ref(c.memblock);
709 pa_assert(result->length <= c.length);
710 c.length = result->length;
711 }
712
713 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
714 pa_source_output_assert_ref(o);
715 pa_assert(o->direct_on_input == i);
716 pa_source_post_direct(s->monitor_source, o, &c);
717 }
718
719 pa_memblock_unref(c.memblock);
720 }
721 }
722
723 if (m) {
724 if (m->chunk.memblock)
725 pa_memblock_unref(m->chunk.memblock);
726 pa_memchunk_reset(&m->chunk);
727
728 pa_sink_input_unref(m->userdata);
729 m->userdata = NULL;
730
731 n_unreffed += 1;
732 }
733 }
734
735 /* Now drop references to entries that are included in the
736 * pa_mix_info array but don't exist anymore */
737
738 if (n_unreffed < n) {
739 for (; n > 0; info++, n--) {
740 if (info->userdata)
741 pa_sink_input_unref(info->userdata);
742 if (info->chunk.memblock)
743 pa_memblock_unref(info->chunk.memblock);
744 }
745 }
746
747 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
748 pa_source_post(s->monitor_source, result);
749 }
750
751 /* Called from IO thread context */
752 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
753 pa_mix_info info[MAX_MIX_CHANNELS];
754 unsigned n;
755 size_t block_size_max;
756
757 pa_sink_assert_ref(s);
758 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
759 pa_assert(pa_frame_aligned(length, &s->sample_spec));
760 pa_assert(result);
761
762 pa_sink_ref(s);
763
764 pa_assert(!s->thread_info.rewind_requested);
765 pa_assert(s->thread_info.rewind_nbytes == 0);
766
767 if (s->thread_info.state == PA_SINK_SUSPENDED) {
768 result->memblock = pa_memblock_ref(s->silence.memblock);
769 result->index = s->silence.index;
770 result->length = PA_MIN(s->silence.length, length);
771 return;
772 }
773
774 if (length <= 0)
775 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
776
777 block_size_max = pa_mempool_block_size_max(s->core->mempool);
778 if (length > block_size_max)
779 length = pa_frame_align(block_size_max, &s->sample_spec);
780
781 pa_assert(length > 0);
782
783 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
784
785 if (n == 0) {
786
787 *result = s->silence;
788 pa_memblock_ref(result->memblock);
789
790 if (result->length > length)
791 result->length = length;
792
793 } else if (n == 1) {
794 pa_cvolume volume;
795
796 *result = info[0].chunk;
797 pa_memblock_ref(result->memblock);
798
799 if (result->length > length)
800 result->length = length;
801
802 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
803
804 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
805 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
806 pa_memblock_unref(result->memblock);
807 pa_silence_memchunk_get(&s->core->silence_cache,
808 s->core->mempool,
809 result,
810 &s->sample_spec,
811 result->length);
812 } else {
813 pa_memchunk_make_writable(result, 0);
814 pa_volume_memchunk(result, &s->sample_spec, &volume);
815 }
816 }
817 } else {
818 void *ptr;
819 result->memblock = pa_memblock_new(s->core->mempool, length);
820
821 ptr = pa_memblock_acquire(result->memblock);
822 result->length = pa_mix(info, n,
823 ptr, length,
824 &s->sample_spec,
825 &s->thread_info.soft_volume,
826 s->thread_info.soft_muted);
827 pa_memblock_release(result->memblock);
828
829 result->index = 0;
830 }
831
832 inputs_drop(s, info, n, result);
833
834 pa_sink_unref(s);
835 }
836
837 /* Called from IO thread context */
838 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
839 pa_mix_info info[MAX_MIX_CHANNELS];
840 unsigned n;
841 size_t length, block_size_max;
842
843 pa_sink_assert_ref(s);
844 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
845 pa_assert(target);
846 pa_assert(target->memblock);
847 pa_assert(target->length > 0);
848 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
849
850 pa_sink_ref(s);
851
852 pa_assert(!s->thread_info.rewind_requested);
853 pa_assert(s->thread_info.rewind_nbytes == 0);
854
855 if (s->thread_info.state == PA_SINK_SUSPENDED) {
856 pa_silence_memchunk(target, &s->sample_spec);
857 return;
858 }
859
860 length = target->length;
861 block_size_max = pa_mempool_block_size_max(s->core->mempool);
862 if (length > block_size_max)
863 length = pa_frame_align(block_size_max, &s->sample_spec);
864
865 pa_assert(length > 0);
866
867 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
868
869 if (n == 0) {
870 if (target->length > length)
871 target->length = length;
872
873 pa_silence_memchunk(target, &s->sample_spec);
874 } else if (n == 1) {
875 pa_cvolume volume;
876
877 if (target->length > length)
878 target->length = length;
879
880 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
881
882 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
883 pa_silence_memchunk(target, &s->sample_spec);
884 else {
885 pa_memchunk vchunk;
886
887 vchunk = info[0].chunk;
888 pa_memblock_ref(vchunk.memblock);
889
890 if (vchunk.length > length)
891 vchunk.length = length;
892
893 if (!pa_cvolume_is_norm(&volume)) {
894 pa_memchunk_make_writable(&vchunk, 0);
895 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
896 }
897
898 pa_memchunk_memcpy(target, &vchunk);
899 pa_memblock_unref(vchunk.memblock);
900 }
901
902 } else {
903 void *ptr;
904
905 ptr = pa_memblock_acquire(target->memblock);
906
907 target->length = pa_mix(info, n,
908 (uint8_t*) ptr + target->index, length,
909 &s->sample_spec,
910 &s->thread_info.soft_volume,
911 s->thread_info.soft_muted);
912
913 pa_memblock_release(target->memblock);
914 }
915
916 inputs_drop(s, info, n, target);
917
918 pa_sink_unref(s);
919 }
920
921 /* Called from IO thread context */
922 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
923 pa_memchunk chunk;
924 size_t l, d;
925
926 pa_sink_assert_ref(s);
927 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
928 pa_assert(target);
929 pa_assert(target->memblock);
930 pa_assert(target->length > 0);
931 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
932
933 pa_sink_ref(s);
934
935 pa_assert(!s->thread_info.rewind_requested);
936 pa_assert(s->thread_info.rewind_nbytes == 0);
937
938 l = target->length;
939 d = 0;
940 while (l > 0) {
941 chunk = *target;
942 chunk.index += d;
943 chunk.length -= d;
944
945 pa_sink_render_into(s, &chunk);
946
947 d += chunk.length;
948 l -= chunk.length;
949 }
950
951 pa_sink_unref(s);
952 }
953
954 /* Called from IO thread context */
955 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
956 pa_mix_info info[MAX_MIX_CHANNELS];
957 size_t length1st = length;
958 unsigned n;
959
960 pa_sink_assert_ref(s);
961 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
962 pa_assert(length > 0);
963 pa_assert(pa_frame_aligned(length, &s->sample_spec));
964 pa_assert(result);
965
966 pa_sink_ref(s);
967
968 pa_assert(!s->thread_info.rewind_requested);
969 pa_assert(s->thread_info.rewind_nbytes == 0);
970
971 pa_assert(length > 0);
972
973 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
974
975 if (n == 0) {
976 pa_silence_memchunk_get(&s->core->silence_cache,
977 s->core->mempool,
978 result,
979 &s->sample_spec,
980 length1st);
981 } else if (n == 1) {
982 pa_cvolume volume;
983
984 *result = info[0].chunk;
985 pa_memblock_ref(result->memblock);
986
987 if (result->length > length)
988 result->length = length;
989
990 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
991
992 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
993 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
994 pa_memblock_unref(result->memblock);
995 pa_silence_memchunk_get(&s->core->silence_cache,
996 s->core->mempool,
997 result,
998 &s->sample_spec,
999 result->length);
1000 } else {
1001 pa_memchunk_make_writable(result, length);
1002 pa_volume_memchunk(result, &s->sample_spec, &volume);
1003 }
1004 }
1005 } else {
1006 void *ptr;
1007
1008 result->index = 0;
1009 result->memblock = pa_memblock_new(s->core->mempool, length);
1010
1011 ptr = pa_memblock_acquire(result->memblock);
1012
1013 result->length = pa_mix(info, n,
1014 (uint8_t*) ptr + result->index, length1st,
1015 &s->sample_spec,
1016 &s->thread_info.soft_volume,
1017 s->thread_info.soft_muted);
1018
1019 pa_memblock_release(result->memblock);
1020 }
1021
1022 inputs_drop(s, info, n, result);
1023
1024 if (result->length < length) {
1025 pa_memchunk chunk;
1026 size_t l, d;
1027 pa_memchunk_make_writable(result, length);
1028
1029 l = length - result->length;
1030 d = result->index + result->length;
1031 while (l > 0) {
1032 chunk = *result;
1033 chunk.index = d;
1034 chunk.length = l;
1035
1036 pa_sink_render_into(s, &chunk);
1037
1038 d += chunk.length;
1039 l -= chunk.length;
1040 }
1041 result->length = length;
1042 }
1043
1044 pa_sink_unref(s);
1045 }
1046
1047 /* Called from main thread */
1048 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1049 pa_usec_t usec = 0;
1050
1051 pa_sink_assert_ref(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->state));
1053
1054 /* The returned value is supposed to be in the time domain of the sound card! */
1055
1056 if (s->state == PA_SINK_SUSPENDED)
1057 return 0;
1058
1059 if (!(s->flags & PA_SINK_LATENCY))
1060 return 0;
1061
1062 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1063
1064 return usec;
1065 }
1066
1067 /* Called from IO thread */
1068 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1069 pa_usec_t usec = 0;
1070 pa_msgobject *o;
1071
1072 pa_sink_assert_ref(s);
1073 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1074
1075 /* The returned value is supposed to be in the time domain of the sound card! */
1076
1077 if (s->thread_info.state == PA_SINK_SUSPENDED)
1078 return 0;
1079
1080 if (!(s->flags & PA_SINK_LATENCY))
1081 return 0;
1082
1083 o = PA_MSGOBJECT(s);
1084
1085 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1086
1087 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1088 return -1;
1089
1090 return usec;
1091 }
1092
1093 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1094 unsigned c;
1095
1096 pa_sink_input_assert_ref(i);
1097 pa_assert(new_volume->channels == i->sample_spec.channels);
1098
1099 /*
1100 * This basically calculates:
1101 *
1102 * i->relative_volume := i->virtual_volume / new_volume
1103 * i->soft_volume := i->relative_volume * i->volume_factor
1104 */
1105
1106 /* The new sink volume passed in here must already be remapped to
1107 * the sink input's channel map! */
1108
1109 i->soft_volume.channels = i->sample_spec.channels;
1110
1111 for (c = 0; c < i->sample_spec.channels; c++)
1112
1113 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1114 /* We leave i->relative_volume untouched */
1115 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1116 else {
1117 i->relative_volume[c] =
1118 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1119 pa_sw_volume_to_linear(new_volume->values[c]);
1120
1121 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1122 i->relative_volume[c] *
1123 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1124 }
1125
1126 /* Hooks have the ability to play games with i->soft_volume */
1127 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1128
1129 /* We don't copy the soft_volume to the thread_info data
1130 * here. That must be done by the caller */
1131 }
1132
1133 /* Called from main thread */
1134 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1135 pa_sink_input *i;
1136 uint32_t idx;
1137
1138 pa_sink_assert_ref(s);
1139 pa_assert(new_volume);
1140 pa_assert(PA_SINK_IS_LINKED(s->state));
1141 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1142
1143 /* This is called whenever a sink input volume changes or a sink
1144 * input is added/removed and we might need to fix up the sink
1145 * volume accordingly. Please note that we don't actually update
1146 * the sinks volume here, we only return how it needs to be
1147 * updated. The caller should then call pa_sink_set_volume().*/
1148
1149 if (pa_idxset_isempty(s->inputs)) {
1150 /* In the special case that we have no sink input we leave the
1151 * volume unmodified. */
1152 *new_volume = s->reference_volume;
1153 return;
1154 }
1155
1156 pa_cvolume_mute(new_volume, s->channel_map.channels);
1157
1158 /* First let's determine the new maximum volume of all inputs
1159 * connected to this sink */
1160 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1161 unsigned c;
1162 pa_cvolume remapped_volume;
1163
1164 remapped_volume = i->virtual_volume;
1165 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1166
1167 for (c = 0; c < new_volume->channels; c++)
1168 if (remapped_volume.values[c] > new_volume->values[c])
1169 new_volume->values[c] = remapped_volume.values[c];
1170 }
1171
1172 /* Then, let's update the soft volumes of all inputs connected
1173 * to this sink */
1174 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1175 pa_cvolume remapped_new_volume;
1176
1177 remapped_new_volume = *new_volume;
1178 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1179 compute_new_soft_volume(i, &remapped_new_volume);
1180
1181 /* We don't copy soft_volume to the thread_info data here
1182 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1183 * want the update to be atomically with the sink volume
1184 * update, hence we do it within the pa_sink_set_volume() call
1185 * below */
1186 }
1187 }
1188
1189 /* Called from main thread */
1190 void pa_sink_propagate_flat_volume(pa_sink *s) {
1191 pa_sink_input *i;
1192 uint32_t idx;
1193
1194 pa_sink_assert_ref(s);
1195 pa_assert(PA_SINK_IS_LINKED(s->state));
1196 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1197
1198 /* This is called whenever the sink volume changes that is not
1199 * caused by a sink input volume change. We need to fix up the
1200 * sink input volumes accordingly */
1201
1202 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1203 pa_cvolume sink_volume, new_virtual_volume;
1204 unsigned c;
1205
1206 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1207
1208 sink_volume = s->virtual_volume;
1209 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1210
1211 for (c = 0; c < i->sample_spec.channels; c++)
1212 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1213 i->relative_volume[c] *
1214 pa_sw_volume_to_linear(sink_volume.values[c]));
1215
1216 new_virtual_volume.channels = i->sample_spec.channels;
1217
1218 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1219 i->virtual_volume = new_virtual_volume;
1220
1221 /* Hmm, the soft volume might no longer actually match
1222 * what has been chosen as new virtual volume here,
1223 * especially when the old volume was
1224 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1225 * volumes here. */
1226 compute_new_soft_volume(i, &sink_volume);
1227
1228 /* The virtual volume changed, let's tell people so */
1229 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1230 }
1231 }
1232
1233 /* If the soft_volume of any of the sink inputs got changed, let's
1234 * make sure the thread copies are synced up. */
1235 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1236 }
1237
1238 /* Called from main thread */
1239 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference) {
1240 pa_bool_t virtual_volume_changed;
1241
1242 pa_sink_assert_ref(s);
1243 pa_assert(PA_SINK_IS_LINKED(s->state));
1244 pa_assert(volume);
1245 pa_assert(pa_cvolume_valid(volume));
1246 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1247
1248 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1249 s->virtual_volume = *volume;
1250
1251 if (become_reference)
1252 s->reference_volume = s->virtual_volume;
1253
1254 /* Propagate this volume change back to the inputs */
1255 if (virtual_volume_changed)
1256 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1257 pa_sink_propagate_flat_volume(s);
1258
1259 if (s->set_volume) {
1260 /* If we have a function set_volume(), then we do not apply a
1261 * soft volume by default. However, set_volume() is free to
1262 * apply one to s->soft_volume */
1263
1264 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1265 s->set_volume(s);
1266
1267 } else
1268 /* If we have no function set_volume(), then the soft volume
1269 * becomes the virtual volume */
1270 s->soft_volume = s->virtual_volume;
1271
1272 /* This tells the sink that soft and/or virtual volume changed */
1273 if (sendmsg)
1274 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1275
1276 if (virtual_volume_changed)
1277 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1278 }
1279
1280 /* Called from main thread. Only to be called by sink implementor */
1281 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1282 pa_sink_assert_ref(s);
1283 pa_assert(volume);
1284
1285 s->soft_volume = *volume;
1286
1287 if (PA_SINK_IS_LINKED(s->state))
1288 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1289 else
1290 s->thread_info.soft_volume = *volume;
1291 }
1292
1293 /* Called from main thread */
1294 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1295 pa_sink_assert_ref(s);
1296
1297 if (s->refresh_volume || force_refresh) {
1298 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1299
1300 if (s->get_volume)
1301 s->get_volume(s);
1302
1303 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1304
1305 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1306
1307 s->reference_volume = s->virtual_volume;
1308
1309 if (s->flags & PA_SINK_FLAT_VOLUME)
1310 pa_sink_propagate_flat_volume(s);
1311
1312 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1313 }
1314 }
1315
1316 return reference ? &s->reference_volume : &s->virtual_volume;
1317 }
1318
1319 /* Called from main thread */
1320 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1321 pa_sink_assert_ref(s);
1322
1323 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1324
1325 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1326 return;
1327
1328 s->reference_volume = s->virtual_volume = *new_volume;
1329
1330 if (s->flags & PA_SINK_FLAT_VOLUME)
1331 pa_sink_propagate_flat_volume(s);
1332
1333 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1334 }
1335
1336 /* Called from main thread */
1337 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1338 pa_bool_t old_muted;
1339
1340 pa_sink_assert_ref(s);
1341 pa_assert(PA_SINK_IS_LINKED(s->state));
1342
1343 old_muted = s->muted;
1344 s->muted = mute;
1345
1346 if (s->set_mute)
1347 s->set_mute(s);
1348
1349 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1350
1351 if (old_muted != s->muted)
1352 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1353 }
1354
1355 /* Called from main thread */
1356 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1357
1358 pa_sink_assert_ref(s);
1359
1360 if (s->refresh_muted || force_refresh) {
1361 pa_bool_t old_muted = s->muted;
1362
1363 if (s->get_mute)
1364 s->get_mute(s);
1365
1366 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1367
1368 if (old_muted != s->muted)
1369 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1370 }
1371
1372 return s->muted;
1373 }
1374
1375 /* Called from main thread */
1376 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1377 pa_sink_assert_ref(s);
1378
1379 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1380
1381 if (s->muted == new_muted)
1382 return;
1383
1384 s->muted = new_muted;
1385 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1386 }
1387
1388 /* Called from main thread */
1389 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1390 pa_sink_assert_ref(s);
1391
1392 if (p)
1393 pa_proplist_update(s->proplist, mode, p);
1394
1395 if (PA_SINK_IS_LINKED(s->state)) {
1396 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1397 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1398 }
1399
1400 return TRUE;
1401 }
1402
1403 /* Called from main thread */
1404 void pa_sink_set_description(pa_sink *s, const char *description) {
1405 const char *old;
1406 pa_sink_assert_ref(s);
1407
1408 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1409 return;
1410
1411 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1412
1413 if (old && description && !strcmp(old, description))
1414 return;
1415
1416 if (description)
1417 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1418 else
1419 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1420
1421 if (s->monitor_source) {
1422 char *n;
1423
1424 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1425 pa_source_set_description(s->monitor_source, n);
1426 pa_xfree(n);
1427 }
1428
1429 if (PA_SINK_IS_LINKED(s->state)) {
1430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1431 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1432 }
1433 }
1434
1435 /* Called from main thread */
1436 unsigned pa_sink_linked_by(pa_sink *s) {
1437 unsigned ret;
1438
1439 pa_sink_assert_ref(s);
1440 pa_assert(PA_SINK_IS_LINKED(s->state));
1441
1442 ret = pa_idxset_size(s->inputs);
1443
1444 /* We add in the number of streams connected to us here. Please
1445 * note the asymmmetry to pa_sink_used_by()! */
1446
1447 if (s->monitor_source)
1448 ret += pa_source_linked_by(s->monitor_source);
1449
1450 return ret;
1451 }
1452
1453 /* Called from main thread */
1454 unsigned pa_sink_used_by(pa_sink *s) {
1455 unsigned ret;
1456
1457 pa_sink_assert_ref(s);
1458 pa_assert(PA_SINK_IS_LINKED(s->state));
1459
1460 ret = pa_idxset_size(s->inputs);
1461 pa_assert(ret >= s->n_corked);
1462
1463 /* Streams connected to our monitor source do not matter for
1464 * pa_sink_used_by()!.*/
1465
1466 return ret - s->n_corked;
1467 }
1468
1469 /* Called from main thread */
1470 unsigned pa_sink_check_suspend(pa_sink *s) {
1471 unsigned ret;
1472 pa_sink_input *i;
1473 uint32_t idx;
1474
1475 pa_sink_assert_ref(s);
1476
1477 if (!PA_SINK_IS_LINKED(s->state))
1478 return 0;
1479
1480 ret = 0;
1481
1482 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1483 pa_sink_input_state_t st;
1484
1485 st = pa_sink_input_get_state(i);
1486 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1487
1488 if (st == PA_SINK_INPUT_CORKED)
1489 continue;
1490
1491 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1492 continue;
1493
1494 ret ++;
1495 }
1496
1497 if (s->monitor_source)
1498 ret += pa_source_check_suspend(s->monitor_source);
1499
1500 return ret;
1501 }
1502
1503 /* Called from the IO thread */
1504 static void sync_input_volumes_within_thread(pa_sink *s) {
1505 pa_sink_input *i;
1506 void *state = NULL;
1507
1508 pa_sink_assert_ref(s);
1509
1510 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1511 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1512 continue;
1513
1514 i->thread_info.soft_volume = i->soft_volume;
1515 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1516 }
1517 }
1518
1519 /* Called from IO thread, except when it is not */
1520 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1521 pa_sink *s = PA_SINK(o);
1522 pa_sink_assert_ref(s);
1523
1524 switch ((pa_sink_message_t) code) {
1525
1526 case PA_SINK_MESSAGE_ADD_INPUT: {
1527 pa_sink_input *i = PA_SINK_INPUT(userdata);
1528
1529 /* If you change anything here, make sure to change the
1530 * sink input handling a few lines down at
1531 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1532
1533 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1534
1535 /* Since the caller sleeps in pa_sink_input_put(), we can
1536 * safely access data outside of thread_info even though
1537 * it is mutable */
1538
1539 if ((i->thread_info.sync_prev = i->sync_prev)) {
1540 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1541 pa_assert(i->sync_prev->sync_next == i);
1542 i->thread_info.sync_prev->thread_info.sync_next = i;
1543 }
1544
1545 if ((i->thread_info.sync_next = i->sync_next)) {
1546 pa_assert(i->sink == i->thread_info.sync_next->sink);
1547 pa_assert(i->sync_next->sync_prev == i);
1548 i->thread_info.sync_next->thread_info.sync_prev = i;
1549 }
1550
1551 pa_assert(!i->thread_info.attached);
1552 i->thread_info.attached = TRUE;
1553
1554 if (i->attach)
1555 i->attach(i);
1556
1557 pa_sink_input_set_state_within_thread(i, i->state);
1558
1559 /* The requested latency of the sink input needs to be
1560 * fixed up and then configured on the sink */
1561
1562 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1563 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1564
1565 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1566 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1567
1568 /* We don't rewind here automatically. This is left to the
1569 * sink input implementor because some sink inputs need a
1570 * slow start, i.e. need some time to buffer client
1571 * samples before beginning streaming. */
1572
1573 /* In flat volume mode we need to update the volume as
1574 * well */
1575 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1576 }
1577
1578 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1579 pa_sink_input *i = PA_SINK_INPUT(userdata);
1580
1581 /* If you change anything here, make sure to change the
1582 * sink input handling a few lines down at
1583 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1584
1585 if (i->detach)
1586 i->detach(i);
1587
1588 pa_sink_input_set_state_within_thread(i, i->state);
1589
1590 pa_assert(i->thread_info.attached);
1591 i->thread_info.attached = FALSE;
1592
1593 /* Since the caller sleeps in pa_sink_input_unlink(),
1594 * we can safely access data outside of thread_info even
1595 * though it is mutable */
1596
1597 pa_assert(!i->sync_prev);
1598 pa_assert(!i->sync_next);
1599
1600 if (i->thread_info.sync_prev) {
1601 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1602 i->thread_info.sync_prev = NULL;
1603 }
1604
1605 if (i->thread_info.sync_next) {
1606 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1607 i->thread_info.sync_next = NULL;
1608 }
1609
1610 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1611 pa_sink_input_unref(i);
1612
1613 pa_sink_invalidate_requested_latency(s);
1614 pa_sink_request_rewind(s, (size_t) -1);
1615
1616 /* In flat volume mode we need to update the volume as
1617 * well */
1618 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1619 }
1620
1621 case PA_SINK_MESSAGE_START_MOVE: {
1622 pa_sink_input *i = PA_SINK_INPUT(userdata);
1623
1624 /* We don't support moving synchronized streams. */
1625 pa_assert(!i->sync_prev);
1626 pa_assert(!i->sync_next);
1627 pa_assert(!i->thread_info.sync_next);
1628 pa_assert(!i->thread_info.sync_prev);
1629
1630 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1631 pa_usec_t usec = 0;
1632 size_t sink_nbytes, total_nbytes;
1633
1634 /* Get the latency of the sink */
1635 if (!(s->flags & PA_SINK_LATENCY) ||
1636 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1637 usec = 0;
1638
1639 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1640 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1641
1642 if (total_nbytes > 0) {
1643 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1644 i->thread_info.rewrite_flush = TRUE;
1645 pa_sink_input_process_rewind(i, sink_nbytes);
1646 }
1647 }
1648
1649 if (i->detach)
1650 i->detach(i);
1651
1652 pa_assert(i->thread_info.attached);
1653 i->thread_info.attached = FALSE;
1654
1655 /* Let's remove the sink input ...*/
1656 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1657 pa_sink_input_unref(i);
1658
1659 pa_sink_invalidate_requested_latency(s);
1660
1661 pa_log_debug("Requesting rewind due to started move");
1662 pa_sink_request_rewind(s, (size_t) -1);
1663
1664 /* In flat volume mode we need to update the volume as
1665 * well */
1666 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1667 }
1668
1669 case PA_SINK_MESSAGE_FINISH_MOVE: {
1670 pa_sink_input *i = PA_SINK_INPUT(userdata);
1671
1672 /* We don't support moving synchronized streams. */
1673 pa_assert(!i->sync_prev);
1674 pa_assert(!i->sync_next);
1675 pa_assert(!i->thread_info.sync_next);
1676 pa_assert(!i->thread_info.sync_prev);
1677
1678 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1679
1680 pa_assert(!i->thread_info.attached);
1681 i->thread_info.attached = TRUE;
1682
1683 if (i->attach)
1684 i->attach(i);
1685
1686 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1687 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1688
1689 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1690 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1691
1692 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1693 pa_usec_t usec = 0;
1694 size_t nbytes;
1695
1696 /* Get the latency of the sink */
1697 if (!(s->flags & PA_SINK_LATENCY) ||
1698 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1699 usec = 0;
1700
1701 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1702
1703 if (nbytes > 0)
1704 pa_sink_input_drop(i, nbytes);
1705
1706 pa_log_debug("Requesting rewind due to finished move");
1707 pa_sink_request_rewind(s, nbytes);
1708 }
1709
1710 /* In flat volume mode we need to update the volume as
1711 * well */
1712 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1713 }
1714
1715 case PA_SINK_MESSAGE_SET_VOLUME:
1716
1717 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1718 s->thread_info.soft_volume = s->soft_volume;
1719 pa_sink_request_rewind(s, (size_t) -1);
1720 }
1721
1722 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1723 return 0;
1724
1725 /* Fall through ... */
1726
1727 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1728 sync_input_volumes_within_thread(s);
1729 return 0;
1730
1731 case PA_SINK_MESSAGE_GET_VOLUME:
1732 return 0;
1733
1734 case PA_SINK_MESSAGE_SET_MUTE:
1735
1736 if (s->thread_info.soft_muted != s->muted) {
1737 s->thread_info.soft_muted = s->muted;
1738 pa_sink_request_rewind(s, (size_t) -1);
1739 }
1740
1741 return 0;
1742
1743 case PA_SINK_MESSAGE_GET_MUTE:
1744 return 0;
1745
1746 case PA_SINK_MESSAGE_SET_STATE: {
1747
1748 pa_bool_t suspend_change =
1749 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1750 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1751
1752 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1753
1754 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1755 s->thread_info.rewind_nbytes = 0;
1756 s->thread_info.rewind_requested = FALSE;
1757 }
1758
1759 if (suspend_change) {
1760 pa_sink_input *i;
1761 void *state = NULL;
1762
1763 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1764 if (i->suspend_within_thread)
1765 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1766 }
1767
1768 return 0;
1769 }
1770
1771 case PA_SINK_MESSAGE_DETACH:
1772
1773 /* Detach all streams */
1774 pa_sink_detach_within_thread(s);
1775 return 0;
1776
1777 case PA_SINK_MESSAGE_ATTACH:
1778
1779 /* Reattach all streams */
1780 pa_sink_attach_within_thread(s);
1781 return 0;
1782
1783 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1784
1785 pa_usec_t *usec = userdata;
1786 *usec = pa_sink_get_requested_latency_within_thread(s);
1787
1788 if (*usec == (pa_usec_t) -1)
1789 *usec = s->thread_info.max_latency;
1790
1791 return 0;
1792 }
1793
1794 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1795 pa_usec_t *r = userdata;
1796
1797 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1798
1799 return 0;
1800 }
1801
1802 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1803 pa_usec_t *r = userdata;
1804
1805 r[0] = s->thread_info.min_latency;
1806 r[1] = s->thread_info.max_latency;
1807
1808 return 0;
1809 }
1810
1811 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1812
1813 *((size_t*) userdata) = s->thread_info.max_rewind;
1814 return 0;
1815
1816 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1817
1818 *((size_t*) userdata) = s->thread_info.max_request;
1819 return 0;
1820
1821 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1822
1823 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1824 return 0;
1825
1826 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1827
1828 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1829 return 0;
1830
1831 case PA_SINK_MESSAGE_GET_LATENCY:
1832 case PA_SINK_MESSAGE_MAX:
1833 ;
1834 }
1835
1836 return -1;
1837 }
1838
1839 /* Called from main thread */
1840 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1841 pa_sink *sink;
1842 uint32_t idx;
1843 int ret = 0;
1844
1845 pa_core_assert_ref(c);
1846 pa_assert(cause != 0);
1847
1848 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1849 int r;
1850
1851 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1852 ret = r;
1853 }
1854
1855 return ret;
1856 }
1857
1858 /* Called from main thread */
1859 void pa_sink_detach(pa_sink *s) {
1860 pa_sink_assert_ref(s);
1861 pa_assert(PA_SINK_IS_LINKED(s->state));
1862
1863 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1864 }
1865
1866 /* Called from main thread */
1867 void pa_sink_attach(pa_sink *s) {
1868 pa_sink_assert_ref(s);
1869 pa_assert(PA_SINK_IS_LINKED(s->state));
1870
1871 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1872 }
1873
1874 /* Called from IO thread */
1875 void pa_sink_detach_within_thread(pa_sink *s) {
1876 pa_sink_input *i;
1877 void *state = NULL;
1878
1879 pa_sink_assert_ref(s);
1880 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1881
1882 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1883 if (i->detach)
1884 i->detach(i);
1885
1886 if (s->monitor_source)
1887 pa_source_detach_within_thread(s->monitor_source);
1888 }
1889
1890 /* Called from IO thread */
1891 void pa_sink_attach_within_thread(pa_sink *s) {
1892 pa_sink_input *i;
1893 void *state = NULL;
1894
1895 pa_sink_assert_ref(s);
1896 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1897
1898 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1899 if (i->attach)
1900 i->attach(i);
1901
1902 if (s->monitor_source)
1903 pa_source_attach_within_thread(s->monitor_source);
1904 }
1905
1906 /* Called from IO thread */
1907 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1908 pa_sink_assert_ref(s);
1909 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1910
1911 if (s->thread_info.state == PA_SINK_SUSPENDED)
1912 return;
1913
1914 if (nbytes == (size_t) -1)
1915 nbytes = s->thread_info.max_rewind;
1916
1917 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1918
1919 if (s->thread_info.rewind_requested &&
1920 nbytes <= s->thread_info.rewind_nbytes)
1921 return;
1922
1923 s->thread_info.rewind_nbytes = nbytes;
1924 s->thread_info.rewind_requested = TRUE;
1925
1926 if (s->request_rewind)
1927 s->request_rewind(s);
1928 }
1929
1930 /* Called from IO thread */
1931 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1932 pa_usec_t result = (pa_usec_t) -1;
1933 pa_sink_input *i;
1934 void *state = NULL;
1935 pa_usec_t monitor_latency;
1936
1937 pa_sink_assert_ref(s);
1938
1939 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
1940 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1941
1942 if (s->thread_info.requested_latency_valid)
1943 return s->thread_info.requested_latency;
1944
1945 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1946
1947 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1948 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1949 result = i->thread_info.requested_sink_latency;
1950
1951 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1952
1953 if (monitor_latency != (pa_usec_t) -1 &&
1954 (result == (pa_usec_t) -1 || result > monitor_latency))
1955 result = monitor_latency;
1956
1957 if (result != (pa_usec_t) -1)
1958 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1959
1960 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1961 /* Only cache if properly initialized */
1962 s->thread_info.requested_latency = result;
1963 s->thread_info.requested_latency_valid = TRUE;
1964 }
1965
1966 return result;
1967 }
1968
1969 /* Called from main thread */
1970 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1971 pa_usec_t usec = 0;
1972
1973 pa_sink_assert_ref(s);
1974 pa_assert(PA_SINK_IS_LINKED(s->state));
1975
1976 if (s->state == PA_SINK_SUSPENDED)
1977 return 0;
1978
1979 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1980 return usec;
1981 }
1982
1983 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1984 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1985 pa_sink_input *i;
1986 void *state = NULL;
1987
1988 pa_sink_assert_ref(s);
1989
1990 if (max_rewind == s->thread_info.max_rewind)
1991 return;
1992
1993 s->thread_info.max_rewind = max_rewind;
1994
1995 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1996 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1997 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1998 }
1999
2000 if (s->monitor_source)
2001 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2002 }
2003
2004 /* Called from main thread */
2005 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2006 pa_sink_assert_ref(s);
2007
2008 if (PA_SINK_IS_LINKED(s->state))
2009 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2010 else
2011 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2012 }
2013
2014 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2015 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2016 void *state = NULL;
2017
2018 pa_sink_assert_ref(s);
2019
2020 if (max_request == s->thread_info.max_request)
2021 return;
2022
2023 s->thread_info.max_request = max_request;
2024
2025 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2026 pa_sink_input *i;
2027
2028 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2029 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2030 }
2031 }
2032
2033 /* Called from main thread */
2034 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2035 pa_sink_assert_ref(s);
2036
2037 if (PA_SINK_IS_LINKED(s->state))
2038 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2039 else
2040 pa_sink_set_max_request_within_thread(s, max_request);
2041 }
2042
2043 /* Called from IO thread */
2044 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2045 pa_sink_input *i;
2046 void *state = NULL;
2047
2048 pa_sink_assert_ref(s);
2049
2050 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2051 return;
2052
2053 s->thread_info.requested_latency_valid = FALSE;
2054
2055 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2056
2057 if (s->update_requested_latency)
2058 s->update_requested_latency(s);
2059
2060 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2061 if (i->update_sink_requested_latency)
2062 i->update_sink_requested_latency(i);
2063 }
2064 }
2065
2066 /* Called from main thread */
2067 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2068 pa_sink_assert_ref(s);
2069
2070 /* min_latency == 0: no limit
2071 * min_latency anything else: specified limit
2072 *
2073 * Similar for max_latency */
2074
2075 if (min_latency < ABSOLUTE_MIN_LATENCY)
2076 min_latency = ABSOLUTE_MIN_LATENCY;
2077
2078 if (max_latency <= 0 ||
2079 max_latency > ABSOLUTE_MAX_LATENCY)
2080 max_latency = ABSOLUTE_MAX_LATENCY;
2081
2082 pa_assert(min_latency <= max_latency);
2083
2084 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2085 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2086 max_latency == ABSOLUTE_MAX_LATENCY) ||
2087 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2088
2089 if (PA_SINK_IS_LINKED(s->state)) {
2090 pa_usec_t r[2];
2091
2092 r[0] = min_latency;
2093 r[1] = max_latency;
2094
2095 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2096 } else
2097 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2098 }
2099
2100 /* Called from main thread */
2101 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2102 pa_sink_assert_ref(s);
2103 pa_assert(min_latency);
2104 pa_assert(max_latency);
2105
2106 if (PA_SINK_IS_LINKED(s->state)) {
2107 pa_usec_t r[2] = { 0, 0 };
2108
2109 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2110
2111 *min_latency = r[0];
2112 *max_latency = r[1];
2113 } else {
2114 *min_latency = s->thread_info.min_latency;
2115 *max_latency = s->thread_info.max_latency;
2116 }
2117 }
2118
2119 /* Called from IO thread */
2120 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2121 void *state = NULL;
2122
2123 pa_sink_assert_ref(s);
2124
2125 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2126 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2127 pa_assert(min_latency <= max_latency);
2128
2129 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2130 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2131 max_latency == ABSOLUTE_MAX_LATENCY) ||
2132 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2133
2134 s->thread_info.min_latency = min_latency;
2135 s->thread_info.max_latency = max_latency;
2136
2137 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2138 pa_sink_input *i;
2139
2140 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2141 if (i->update_sink_latency_range)
2142 i->update_sink_latency_range(i);
2143 }
2144
2145 pa_sink_invalidate_requested_latency(s);
2146
2147 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2148 }
2149
2150 /* Called from main thread, before the sink is put */
2151 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2152 pa_sink_assert_ref(s);
2153
2154 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2155
2156 if (latency < ABSOLUTE_MIN_LATENCY)
2157 latency = ABSOLUTE_MIN_LATENCY;
2158
2159 if (latency > ABSOLUTE_MAX_LATENCY)
2160 latency = ABSOLUTE_MAX_LATENCY;
2161
2162 s->fixed_latency = latency;
2163 pa_source_set_fixed_latency(s->monitor_source, latency);
2164 }
2165
2166 /* Called from main context */
2167 size_t pa_sink_get_max_rewind(pa_sink *s) {
2168 size_t r;
2169 pa_sink_assert_ref(s);
2170
2171 if (!PA_SINK_IS_LINKED(s->state))
2172 return s->thread_info.max_rewind;
2173
2174 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2175
2176 return r;
2177 }
2178
2179 /* Called from main context */
2180 size_t pa_sink_get_max_request(pa_sink *s) {
2181 size_t r;
2182 pa_sink_assert_ref(s);
2183
2184 if (!PA_SINK_IS_LINKED(s->state))
2185 return s->thread_info.max_request;
2186
2187 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2188
2189 return r;
2190 }
2191
2192 /* Called from main context */
2193 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2194 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2195
2196 pa_assert(p);
2197
2198 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2199 return TRUE;
2200
2201 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2202
2203 if (pa_streq(ff, "microphone"))
2204 t = "audio-input-microphone";
2205 else if (pa_streq(ff, "webcam"))
2206 t = "camera-web";
2207 else if (pa_streq(ff, "computer"))
2208 t = "computer";
2209 else if (pa_streq(ff, "handset"))
2210 t = "phone";
2211 else if (pa_streq(ff, "portable"))
2212 t = "multimedia-player";
2213 else if (pa_streq(ff, "tv"))
2214 t = "video-display";
2215
2216 /*
2217 * The following icons are not part of the icon naming spec,
2218 * because Rodney Dawes sucks as the maintainer of that spec.
2219 *
2220 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2221 */
2222 else if (pa_streq(ff, "headset"))
2223 t = "audio-headset";
2224 else if (pa_streq(ff, "headphone"))
2225 t = "audio-headphones";
2226 else if (pa_streq(ff, "speaker"))
2227 t = "audio-speakers";
2228 else if (pa_streq(ff, "hands-free"))
2229 t = "audio-handsfree";
2230 }
2231
2232 if (!t)
2233 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2234 if (pa_streq(c, "modem"))
2235 t = "modem";
2236
2237 if (!t) {
2238 if (is_sink)
2239 t = "audio-card";
2240 else
2241 t = "audio-input-microphone";
2242 }
2243
2244 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2245 if (strstr(profile, "analog"))
2246 s = "-analog";
2247 else if (strstr(profile, "iec958"))
2248 s = "-iec958";
2249 else if (strstr(profile, "hdmi"))
2250 s = "-hdmi";
2251 }
2252
2253 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2254
2255 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2256
2257 return TRUE;
2258 }
2259
2260 pa_bool_t pa_device_init_description(pa_proplist *p) {
2261 const char *s;
2262 pa_assert(p);
2263
2264 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2265 return TRUE;
2266
2267 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2268 if (pa_streq(s, "internal")) {
2269 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2270 return TRUE;
2271 }
2272
2273 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2274 if (pa_streq(s, "modem")) {
2275 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2276 return TRUE;
2277 }
2278
2279 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2280 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2281 return TRUE;
2282 }
2283
2284 return FALSE;
2285 }