]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: suppress suspending/resume when we are already in the right state
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_done(pa_sink_new_data *data) {
104 pa_assert(data);
105
106 pa_xfree(data->name);
107 pa_proplist_free(data->proplist);
108 }
109
110 /* Called from main context */
111 static void reset_callbacks(pa_sink *s) {
112 pa_assert(s);
113
114 s->set_state = NULL;
115 s->get_volume = NULL;
116 s->set_volume = NULL;
117 s->get_mute = NULL;
118 s->set_mute = NULL;
119 s->request_rewind = NULL;
120 s->update_requested_latency = NULL;
121 }
122
123 /* Called from main context */
124 pa_sink* pa_sink_new(
125 pa_core *core,
126 pa_sink_new_data *data,
127 pa_sink_flags_t flags) {
128
129 pa_sink *s;
130 const char *name;
131 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
132 pa_source_new_data source_data;
133 const char *dn;
134 char *pt;
135
136 pa_assert(core);
137 pa_assert(data);
138 pa_assert(data->name);
139
140 s = pa_msgobject_new(pa_sink);
141
142 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
143 pa_xfree(s);
144 return NULL;
145 }
146
147 pa_sink_new_data_set_name(data, name);
148
149 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
150 pa_xfree(s);
151 pa_namereg_unregister(core, name);
152 return NULL;
153 }
154
155 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
156 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
157
158 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
159
160 if (!data->channel_map_is_set)
161 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
162
163 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
164 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
165
166 if (!data->volume_is_set)
167 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
168
169 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
170 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
171
172 if (!data->muted_is_set)
173 data->muted = FALSE;
174
175 if (data->card)
176 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
177
178 pa_device_init_description(data->proplist);
179 pa_device_init_icon(data->proplist, TRUE);
180
181 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
182 pa_xfree(s);
183 pa_namereg_unregister(core, name);
184 return NULL;
185 }
186
187 s->parent.parent.free = sink_free;
188 s->parent.process_msg = pa_sink_process_msg;
189
190 s->core = core;
191 s->state = PA_SINK_INIT;
192 s->flags = flags;
193 s->suspend_cause = 0;
194 s->name = pa_xstrdup(name);
195 s->proplist = pa_proplist_copy(data->proplist);
196 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
197 s->module = data->module;
198 s->card = data->card;
199
200 s->sample_spec = data->sample_spec;
201 s->channel_map = data->channel_map;
202
203 s->inputs = pa_idxset_new(NULL, NULL);
204 s->n_corked = 0;
205
206 s->reference_volume = s->virtual_volume = data->volume;
207 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
208 s->base_volume = PA_VOLUME_NORM;
209 s->n_volume_steps = PA_VOLUME_NORM+1;
210 s->muted = data->muted;
211 s->refresh_volume = s->refresh_muted = FALSE;
212
213 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
214
215 reset_callbacks(s);
216 s->userdata = NULL;
217
218 s->asyncmsgq = NULL;
219 s->rtpoll = NULL;
220
221 pa_silence_memchunk_get(
222 &core->silence_cache,
223 core->mempool,
224 &s->silence,
225 &s->sample_spec,
226 0);
227
228 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
229 s->thread_info.soft_volume = s->soft_volume;
230 s->thread_info.soft_muted = s->muted;
231 s->thread_info.state = s->state;
232 s->thread_info.rewind_nbytes = 0;
233 s->thread_info.rewind_requested = FALSE;
234 s->thread_info.max_rewind = 0;
235 s->thread_info.max_request = 0;
236 s->thread_info.requested_latency_valid = FALSE;
237 s->thread_info.requested_latency = 0;
238 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
239 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
240
241 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
242
243 if (s->card)
244 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
245
246 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
247 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
248 s->index,
249 s->name,
250 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
251 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
252 pt);
253 pa_xfree(pt);
254
255 pa_source_new_data_init(&source_data);
256 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
257 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
258 source_data.name = pa_sprintf_malloc("%s.monitor", name);
259 source_data.driver = data->driver;
260 source_data.module = data->module;
261 source_data.card = data->card;
262
263 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
264 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
265 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
266
267 s->monitor_source = pa_source_new(core, &source_data,
268 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
269 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
270
271 pa_source_new_data_done(&source_data);
272
273 if (!s->monitor_source) {
274 pa_sink_unlink(s);
275 pa_sink_unref(s);
276 return NULL;
277 }
278
279 s->monitor_source->monitor_of = s;
280
281 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
282 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
283
284 return s;
285 }
286
287 /* Called from main context */
288 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
289 int ret;
290 pa_bool_t suspend_change;
291 pa_sink_state_t original_state;
292
293 pa_assert(s);
294
295 if (s->state == state)
296 return 0;
297
298 original_state = s->state;
299
300 suspend_change =
301 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
302 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
303
304 if (s->set_state)
305 if ((ret = s->set_state(s, state)) < 0)
306 return ret;
307
308 if (s->asyncmsgq)
309 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
310
311 if (s->set_state)
312 s->set_state(s, original_state);
313
314 return ret;
315 }
316
317 s->state = state;
318
319 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
320 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
321 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
322 }
323
324 if (suspend_change) {
325 pa_sink_input *i;
326 uint32_t idx;
327
328 /* We're suspending or resuming, tell everyone about it */
329
330 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
331 if (s->state == PA_SINK_SUSPENDED &&
332 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
333 pa_sink_input_kill(i);
334 else if (i->suspend)
335 i->suspend(i, state == PA_SINK_SUSPENDED);
336
337 if (s->monitor_source)
338 pa_source_sync_suspend(s->monitor_source);
339 }
340
341 return 0;
342 }
343
344 /* Called from main context */
345 void pa_sink_put(pa_sink* s) {
346 pa_sink_assert_ref(s);
347
348 pa_assert(s->state == PA_SINK_INIT);
349
350 /* The following fields must be initialized properly when calling _put() */
351 pa_assert(s->asyncmsgq);
352 pa_assert(s->rtpoll);
353 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
354
355 /* Generally, flags should be initialized via pa_sink_new(). As a
356 * special exception we allow volume related flags to be set
357 * between _new() and _put(). */
358
359 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
360 s->flags |= PA_SINK_DECIBEL_VOLUME;
361
362 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
363 s->flags |= PA_SINK_FLAT_VOLUME;
364
365 s->thread_info.soft_volume = s->soft_volume;
366 s->thread_info.soft_muted = s->muted;
367
368 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
369 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
370 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
371 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
372 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
373
374 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
375 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
376 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
377
378 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
379
380 pa_source_put(s->monitor_source);
381
382 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
383 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
384 }
385
386 /* Called from main context */
387 void pa_sink_unlink(pa_sink* s) {
388 pa_bool_t linked;
389 pa_sink_input *i, *j = NULL;
390
391 pa_assert(s);
392
393 /* Please note that pa_sink_unlink() does more than simply
394 * reversing pa_sink_put(). It also undoes the registrations
395 * already done in pa_sink_new()! */
396
397 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
398 * may be called multiple times on the same sink without bad
399 * effects. */
400
401 linked = PA_SINK_IS_LINKED(s->state);
402
403 if (linked)
404 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
405
406 if (s->state != PA_SINK_UNLINKED)
407 pa_namereg_unregister(s->core, s->name);
408 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
409
410 if (s->card)
411 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
412
413 while ((i = pa_idxset_first(s->inputs, NULL))) {
414 pa_assert(i != j);
415 pa_sink_input_kill(i);
416 j = i;
417 }
418
419 if (linked)
420 sink_set_state(s, PA_SINK_UNLINKED);
421 else
422 s->state = PA_SINK_UNLINKED;
423
424 reset_callbacks(s);
425
426 if (s->monitor_source)
427 pa_source_unlink(s->monitor_source);
428
429 if (linked) {
430 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
431 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
432 }
433 }
434
435 /* Called from main context */
436 static void sink_free(pa_object *o) {
437 pa_sink *s = PA_SINK(o);
438 pa_sink_input *i;
439
440 pa_assert(s);
441 pa_assert(pa_sink_refcnt(s) == 0);
442
443 if (PA_SINK_IS_LINKED(s->state))
444 pa_sink_unlink(s);
445
446 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
447
448 if (s->monitor_source) {
449 pa_source_unref(s->monitor_source);
450 s->monitor_source = NULL;
451 }
452
453 pa_idxset_free(s->inputs, NULL, NULL);
454
455 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
456 pa_sink_input_unref(i);
457
458 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
459
460 if (s->silence.memblock)
461 pa_memblock_unref(s->silence.memblock);
462
463 pa_xfree(s->name);
464 pa_xfree(s->driver);
465
466 if (s->proplist)
467 pa_proplist_free(s->proplist);
468
469 pa_xfree(s);
470 }
471
472 /* Called from main context */
473 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
474 pa_sink_assert_ref(s);
475
476 s->asyncmsgq = q;
477
478 if (s->monitor_source)
479 pa_source_set_asyncmsgq(s->monitor_source, q);
480 }
481
482 /* Called from main context */
483 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
484 pa_sink_assert_ref(s);
485
486 s->rtpoll = p;
487 if (s->monitor_source)
488 pa_source_set_rtpoll(s->monitor_source, p);
489 }
490
491 /* Called from main context */
492 int pa_sink_update_status(pa_sink*s) {
493 pa_sink_assert_ref(s);
494 pa_assert(PA_SINK_IS_LINKED(s->state));
495
496 if (s->state == PA_SINK_SUSPENDED)
497 return 0;
498
499 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
500 }
501
502 /* Called from main context */
503 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
504 pa_sink_assert_ref(s);
505 pa_assert(PA_SINK_IS_LINKED(s->state));
506 pa_assert(cause != 0);
507
508 if (suspend)
509 s->suspend_cause |= cause;
510 else
511 s->suspend_cause &= ~cause;
512
513 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
514 return 0;
515
516 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
517
518 if (s->suspend_cause)
519 return sink_set_state(s, PA_SINK_SUSPENDED);
520 else
521 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
522 }
523
524 /* Called from main context */
525 pa_queue *pa_sink_move_all_start(pa_sink *s) {
526 pa_queue *q;
527 pa_sink_input *i, *n;
528 uint32_t idx;
529
530 pa_sink_assert_ref(s);
531 pa_assert(PA_SINK_IS_LINKED(s->state));
532
533 q = pa_queue_new();
534
535 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
536 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
537
538 pa_sink_input_ref(i);
539
540 if (pa_sink_input_start_move(i) >= 0)
541 pa_queue_push(q, i);
542 else
543 pa_sink_input_unref(i);
544 }
545
546 return q;
547 }
548
549 /* Called from main context */
550 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
551 pa_sink_input *i;
552
553 pa_sink_assert_ref(s);
554 pa_assert(PA_SINK_IS_LINKED(s->state));
555 pa_assert(q);
556
557 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
558 if (pa_sink_input_finish_move(i, s, save) < 0)
559 pa_sink_input_kill(i);
560
561 pa_sink_input_unref(i);
562 }
563
564 pa_queue_free(q, NULL, NULL);
565 }
566
567 /* Called from main context */
568 void pa_sink_move_all_fail(pa_queue *q) {
569 pa_sink_input *i;
570 pa_assert(q);
571
572 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
573 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
574 pa_sink_input_kill(i);
575 pa_sink_input_unref(i);
576 }
577 }
578
579 pa_queue_free(q, NULL, NULL);
580 }
581
582 /* Called from IO thread context */
583 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
584 pa_sink_input *i;
585 void *state = NULL;
586 pa_sink_assert_ref(s);
587 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
588
589 /* If nobody requested this and this is actually no real rewind
590 * then we can short cut this */
591 if (!s->thread_info.rewind_requested && nbytes <= 0)
592 return;
593
594 s->thread_info.rewind_nbytes = 0;
595 s->thread_info.rewind_requested = FALSE;
596
597 if (s->thread_info.state == PA_SINK_SUSPENDED)
598 return;
599
600 if (nbytes > 0)
601 pa_log_debug("Processing rewind...");
602
603 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
604 pa_sink_input_assert_ref(i);
605 pa_sink_input_process_rewind(i, nbytes);
606 }
607
608 if (nbytes > 0)
609 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
610 pa_source_process_rewind(s->monitor_source, nbytes);
611 }
612
613 /* Called from IO thread context */
614 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
615 pa_sink_input *i;
616 unsigned n = 0;
617 void *state = NULL;
618 size_t mixlength = *length;
619
620 pa_sink_assert_ref(s);
621 pa_assert(info);
622
623 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
624 pa_sink_input_assert_ref(i);
625
626 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
627
628 if (mixlength == 0 || info->chunk.length < mixlength)
629 mixlength = info->chunk.length;
630
631 if (pa_memblock_is_silence(info->chunk.memblock)) {
632 pa_memblock_unref(info->chunk.memblock);
633 continue;
634 }
635
636 info->userdata = pa_sink_input_ref(i);
637
638 pa_assert(info->chunk.memblock);
639 pa_assert(info->chunk.length > 0);
640
641 info++;
642 n++;
643 maxinfo--;
644 }
645
646 if (mixlength > 0)
647 *length = mixlength;
648
649 return n;
650 }
651
652 /* Called from IO thread context */
653 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
654 pa_sink_input *i;
655 void *state = NULL;
656 unsigned p = 0;
657 unsigned n_unreffed = 0;
658
659 pa_sink_assert_ref(s);
660 pa_assert(result);
661 pa_assert(result->memblock);
662 pa_assert(result->length > 0);
663
664 /* We optimize for the case where the order of the inputs has not changed */
665
666 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
667 unsigned j;
668 pa_mix_info* m = NULL;
669
670 pa_sink_input_assert_ref(i);
671
672 /* Let's try to find the matching entry info the pa_mix_info array */
673 for (j = 0; j < n; j ++) {
674
675 if (info[p].userdata == i) {
676 m = info + p;
677 break;
678 }
679
680 p++;
681 if (p >= n)
682 p = 0;
683 }
684
685 /* Drop read data */
686 pa_sink_input_drop(i, result->length);
687
688 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
689
690 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
691 void *ostate = NULL;
692 pa_source_output *o;
693 pa_memchunk c;
694
695 if (m && m->chunk.memblock) {
696 c = m->chunk;
697 pa_memblock_ref(c.memblock);
698 pa_assert(result->length <= c.length);
699 c.length = result->length;
700
701 pa_memchunk_make_writable(&c, 0);
702 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
703 } else {
704 c = s->silence;
705 pa_memblock_ref(c.memblock);
706 pa_assert(result->length <= c.length);
707 c.length = result->length;
708 }
709
710 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
711 pa_source_output_assert_ref(o);
712 pa_assert(o->direct_on_input == i);
713 pa_source_post_direct(s->monitor_source, o, &c);
714 }
715
716 pa_memblock_unref(c.memblock);
717 }
718 }
719
720 if (m) {
721 if (m->chunk.memblock)
722 pa_memblock_unref(m->chunk.memblock);
723 pa_memchunk_reset(&m->chunk);
724
725 pa_sink_input_unref(m->userdata);
726 m->userdata = NULL;
727
728 n_unreffed += 1;
729 }
730 }
731
732 /* Now drop references to entries that are included in the
733 * pa_mix_info array but don't exist anymore */
734
735 if (n_unreffed < n) {
736 for (; n > 0; info++, n--) {
737 if (info->userdata)
738 pa_sink_input_unref(info->userdata);
739 if (info->chunk.memblock)
740 pa_memblock_unref(info->chunk.memblock);
741 }
742 }
743
744 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
745 pa_source_post(s->monitor_source, result);
746 }
747
748 /* Called from IO thread context */
749 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
750 pa_mix_info info[MAX_MIX_CHANNELS];
751 unsigned n;
752 size_t block_size_max;
753
754 pa_sink_assert_ref(s);
755 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
756 pa_assert(pa_frame_aligned(length, &s->sample_spec));
757 pa_assert(result);
758
759 pa_sink_ref(s);
760
761 pa_assert(!s->thread_info.rewind_requested);
762 pa_assert(s->thread_info.rewind_nbytes == 0);
763
764 if (s->thread_info.state == PA_SINK_SUSPENDED) {
765 result->memblock = pa_memblock_ref(s->silence.memblock);
766 result->index = s->silence.index;
767 result->length = PA_MIN(s->silence.length, length);
768 return;
769 }
770
771 if (length <= 0)
772 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
773
774 block_size_max = pa_mempool_block_size_max(s->core->mempool);
775 if (length > block_size_max)
776 length = pa_frame_align(block_size_max, &s->sample_spec);
777
778 pa_assert(length > 0);
779
780 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
781
782 if (n == 0) {
783
784 *result = s->silence;
785 pa_memblock_ref(result->memblock);
786
787 if (result->length > length)
788 result->length = length;
789
790 } else if (n == 1) {
791 pa_cvolume volume;
792
793 *result = info[0].chunk;
794 pa_memblock_ref(result->memblock);
795
796 if (result->length > length)
797 result->length = length;
798
799 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
800
801 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
802 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
803 pa_memblock_unref(result->memblock);
804 pa_silence_memchunk_get(&s->core->silence_cache,
805 s->core->mempool,
806 result,
807 &s->sample_spec,
808 result->length);
809 } else {
810 pa_memchunk_make_writable(result, 0);
811 pa_volume_memchunk(result, &s->sample_spec, &volume);
812 }
813 }
814 } else {
815 void *ptr;
816 result->memblock = pa_memblock_new(s->core->mempool, length);
817
818 ptr = pa_memblock_acquire(result->memblock);
819 result->length = pa_mix(info, n,
820 ptr, length,
821 &s->sample_spec,
822 &s->thread_info.soft_volume,
823 s->thread_info.soft_muted);
824 pa_memblock_release(result->memblock);
825
826 result->index = 0;
827 }
828
829 inputs_drop(s, info, n, result);
830
831 pa_sink_unref(s);
832 }
833
834 /* Called from IO thread context */
835 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
836 pa_mix_info info[MAX_MIX_CHANNELS];
837 unsigned n;
838 size_t length, block_size_max;
839
840 pa_sink_assert_ref(s);
841 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
842 pa_assert(target);
843 pa_assert(target->memblock);
844 pa_assert(target->length > 0);
845 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
846
847 pa_sink_ref(s);
848
849 pa_assert(!s->thread_info.rewind_requested);
850 pa_assert(s->thread_info.rewind_nbytes == 0);
851
852 if (s->thread_info.state == PA_SINK_SUSPENDED) {
853 pa_silence_memchunk(target, &s->sample_spec);
854 return;
855 }
856
857 length = target->length;
858 block_size_max = pa_mempool_block_size_max(s->core->mempool);
859 if (length > block_size_max)
860 length = pa_frame_align(block_size_max, &s->sample_spec);
861
862 pa_assert(length > 0);
863
864 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
865
866 if (n == 0) {
867 if (target->length > length)
868 target->length = length;
869
870 pa_silence_memchunk(target, &s->sample_spec);
871 } else if (n == 1) {
872 pa_cvolume volume;
873
874 if (target->length > length)
875 target->length = length;
876
877 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
878
879 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
880 pa_silence_memchunk(target, &s->sample_spec);
881 else {
882 pa_memchunk vchunk;
883
884 vchunk = info[0].chunk;
885 pa_memblock_ref(vchunk.memblock);
886
887 if (vchunk.length > length)
888 vchunk.length = length;
889
890 if (!pa_cvolume_is_norm(&volume)) {
891 pa_memchunk_make_writable(&vchunk, 0);
892 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
893 }
894
895 pa_memchunk_memcpy(target, &vchunk);
896 pa_memblock_unref(vchunk.memblock);
897 }
898
899 } else {
900 void *ptr;
901
902 ptr = pa_memblock_acquire(target->memblock);
903
904 target->length = pa_mix(info, n,
905 (uint8_t*) ptr + target->index, length,
906 &s->sample_spec,
907 &s->thread_info.soft_volume,
908 s->thread_info.soft_muted);
909
910 pa_memblock_release(target->memblock);
911 }
912
913 inputs_drop(s, info, n, target);
914
915 pa_sink_unref(s);
916 }
917
918 /* Called from IO thread context */
919 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
920 pa_memchunk chunk;
921 size_t l, d;
922
923 pa_sink_assert_ref(s);
924 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
925 pa_assert(target);
926 pa_assert(target->memblock);
927 pa_assert(target->length > 0);
928 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
929
930 pa_sink_ref(s);
931
932 pa_assert(!s->thread_info.rewind_requested);
933 pa_assert(s->thread_info.rewind_nbytes == 0);
934
935 l = target->length;
936 d = 0;
937 while (l > 0) {
938 chunk = *target;
939 chunk.index += d;
940 chunk.length -= d;
941
942 pa_sink_render_into(s, &chunk);
943
944 d += chunk.length;
945 l -= chunk.length;
946 }
947
948 pa_sink_unref(s);
949 }
950
951 /* Called from IO thread context */
952 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
953 pa_mix_info info[MAX_MIX_CHANNELS];
954 size_t length1st = length;
955 unsigned n;
956
957 pa_sink_assert_ref(s);
958 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
959 pa_assert(length > 0);
960 pa_assert(pa_frame_aligned(length, &s->sample_spec));
961 pa_assert(result);
962
963 pa_sink_ref(s);
964
965 pa_assert(!s->thread_info.rewind_requested);
966 pa_assert(s->thread_info.rewind_nbytes == 0);
967
968 pa_assert(length > 0);
969
970 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
971
972 if (n == 0) {
973 pa_silence_memchunk_get(&s->core->silence_cache,
974 s->core->mempool,
975 result,
976 &s->sample_spec,
977 length1st);
978 } else if (n == 1) {
979 pa_cvolume volume;
980
981 *result = info[0].chunk;
982 pa_memblock_ref(result->memblock);
983
984 if (result->length > length)
985 result->length = length;
986
987 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
988
989 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
990 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
991 pa_memblock_unref(result->memblock);
992 pa_silence_memchunk_get(&s->core->silence_cache,
993 s->core->mempool,
994 result,
995 &s->sample_spec,
996 result->length);
997 } else {
998 pa_memchunk_make_writable(result, length);
999 pa_volume_memchunk(result, &s->sample_spec, &volume);
1000 }
1001 }
1002 } else {
1003 void *ptr;
1004
1005 result->index = 0;
1006 result->memblock = pa_memblock_new(s->core->mempool, length);
1007
1008 ptr = pa_memblock_acquire(result->memblock);
1009
1010 result->length = pa_mix(info, n,
1011 (uint8_t*) ptr + result->index, length1st,
1012 &s->sample_spec,
1013 &s->thread_info.soft_volume,
1014 s->thread_info.soft_muted);
1015
1016 pa_memblock_release(result->memblock);
1017 }
1018
1019 inputs_drop(s, info, n, result);
1020
1021 if (result->length < length) {
1022 pa_memchunk chunk;
1023 size_t l, d;
1024 pa_memchunk_make_writable(result, length);
1025 result->length = length;
1026
1027 l = length - result->length;
1028 d = result->index + result->length;
1029 while (l > 0) {
1030 chunk = *result;
1031 chunk.index += d;
1032 chunk.length -= d - result->index;
1033
1034 pa_sink_render_into(s, &chunk);
1035
1036 d += chunk.length;
1037 l -= chunk.length;
1038 }
1039 result->length = length;
1040 }
1041
1042 pa_sink_unref(s);
1043 }
1044
1045 /* Called from main thread */
1046 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1047 pa_usec_t usec = 0;
1048
1049 pa_sink_assert_ref(s);
1050 pa_assert(PA_SINK_IS_LINKED(s->state));
1051
1052 /* The returned value is supposed to be in the time domain of the sound card! */
1053
1054 if (s->state == PA_SINK_SUSPENDED)
1055 return 0;
1056
1057 if (!(s->flags & PA_SINK_LATENCY))
1058 return 0;
1059
1060 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1061
1062 return usec;
1063 }
1064
1065 /* Called from IO thread */
1066 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1067 pa_usec_t usec = 0;
1068 pa_msgobject *o;
1069
1070 pa_sink_assert_ref(s);
1071 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1072
1073 /* The returned value is supposed to be in the time domain of the sound card! */
1074
1075 if (s->thread_info.state == PA_SINK_SUSPENDED)
1076 return 0;
1077
1078 if (!(s->flags & PA_SINK_LATENCY))
1079 return 0;
1080
1081 o = PA_MSGOBJECT(s);
1082
1083 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1084
1085 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1086 return -1;
1087
1088 return usec;
1089 }
1090
1091 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1092 unsigned c;
1093
1094 pa_sink_input_assert_ref(i);
1095 pa_assert(new_volume->channels == i->sample_spec.channels);
1096
1097 /*
1098 * This basically calculates:
1099 *
1100 * i->relative_volume := i->virtual_volume / new_volume
1101 * i->soft_volume := i->relative_volume * i->volume_factor
1102 */
1103
1104 /* The new sink volume passed in here must already be remapped to
1105 * the sink input's channel map! */
1106
1107 i->soft_volume.channels = i->sample_spec.channels;
1108
1109 for (c = 0; c < i->sample_spec.channels; c++)
1110
1111 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1112 /* We leave i->relative_volume untouched */
1113 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1114 else {
1115 i->relative_volume[c] =
1116 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1117 pa_sw_volume_to_linear(new_volume->values[c]);
1118
1119 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1120 i->relative_volume[c] *
1121 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1122 }
1123
1124 /* Hooks have the ability to play games with i->soft_volume */
1125 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1126
1127 /* We don't copy the soft_volume to the thread_info data
1128 * here. That must be done by the caller */
1129 }
1130
1131 /* Called from main thread */
1132 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1133 pa_sink_input *i;
1134 uint32_t idx;
1135
1136 pa_sink_assert_ref(s);
1137 pa_assert(new_volume);
1138 pa_assert(PA_SINK_IS_LINKED(s->state));
1139 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1140
1141 /* This is called whenever a sink input volume changes or a sink
1142 * input is added/removed and we might need to fix up the sink
1143 * volume accordingly. Please note that we don't actually update
1144 * the sinks volume here, we only return how it needs to be
1145 * updated. The caller should then call pa_sink_set_volume().*/
1146
1147 if (pa_idxset_isempty(s->inputs)) {
1148 /* In the special case that we have no sink input we leave the
1149 * volume unmodified. */
1150 *new_volume = s->reference_volume;
1151 return;
1152 }
1153
1154 pa_cvolume_mute(new_volume, s->channel_map.channels);
1155
1156 /* First let's determine the new maximum volume of all inputs
1157 * connected to this sink */
1158 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1159 unsigned c;
1160 pa_cvolume remapped_volume;
1161
1162 remapped_volume = i->virtual_volume;
1163 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1164
1165 for (c = 0; c < new_volume->channels; c++)
1166 if (remapped_volume.values[c] > new_volume->values[c])
1167 new_volume->values[c] = remapped_volume.values[c];
1168 }
1169
1170 /* Then, let's update the soft volumes of all inputs connected
1171 * to this sink */
1172 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1173 pa_cvolume remapped_new_volume;
1174
1175 remapped_new_volume = *new_volume;
1176 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1177 compute_new_soft_volume(i, &remapped_new_volume);
1178
1179 /* We don't copy soft_volume to the thread_info data here
1180 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1181 * want the update to be atomically with the sink volume
1182 * update, hence we do it within the pa_sink_set_volume() call
1183 * below */
1184 }
1185 }
1186
1187 /* Called from main thread */
1188 void pa_sink_propagate_flat_volume(pa_sink *s) {
1189 pa_sink_input *i;
1190 uint32_t idx;
1191
1192 pa_sink_assert_ref(s);
1193 pa_assert(PA_SINK_IS_LINKED(s->state));
1194 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1195
1196 /* This is called whenever the sink volume changes that is not
1197 * caused by a sink input volume change. We need to fix up the
1198 * sink input volumes accordingly */
1199
1200 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1201 pa_cvolume sink_volume, new_virtual_volume;
1202 unsigned c;
1203
1204 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1205
1206 sink_volume = s->virtual_volume;
1207 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1208
1209 for (c = 0; c < i->sample_spec.channels; c++)
1210 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1211 i->relative_volume[c] *
1212 pa_sw_volume_to_linear(sink_volume.values[c]));
1213
1214 new_virtual_volume.channels = i->sample_spec.channels;
1215
1216 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1217 i->virtual_volume = new_virtual_volume;
1218
1219 /* Hmm, the soft volume might no longer actually match
1220 * what has been chosen as new virtual volume here,
1221 * especially when the old volume was
1222 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1223 * volumes here. */
1224 compute_new_soft_volume(i, &sink_volume);
1225
1226 /* The virtual volume changed, let's tell people so */
1227 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1228 }
1229 }
1230
1231 /* If the soft_volume of any of the sink inputs got changed, let's
1232 * make sure the thread copies are synced up. */
1233 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1234 }
1235
1236 /* Called from main thread */
1237 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference) {
1238 pa_bool_t virtual_volume_changed;
1239
1240 pa_sink_assert_ref(s);
1241 pa_assert(PA_SINK_IS_LINKED(s->state));
1242 pa_assert(volume);
1243 pa_assert(pa_cvolume_valid(volume));
1244 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1245
1246 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1247 s->virtual_volume = *volume;
1248
1249 if (become_reference)
1250 s->reference_volume = s->virtual_volume;
1251
1252 /* Propagate this volume change back to the inputs */
1253 if (virtual_volume_changed)
1254 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1255 pa_sink_propagate_flat_volume(s);
1256
1257 if (s->set_volume) {
1258 /* If we have a function set_volume(), then we do not apply a
1259 * soft volume by default. However, set_volume() is free to
1260 * apply one to s->soft_volume */
1261
1262 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1263 s->set_volume(s);
1264
1265 } else
1266 /* If we have no function set_volume(), then the soft volume
1267 * becomes the virtual volume */
1268 s->soft_volume = s->virtual_volume;
1269
1270 /* This tells the sink that soft and/or virtual volume changed */
1271 if (sendmsg)
1272 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1273
1274 if (virtual_volume_changed)
1275 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1276 }
1277
1278 /* Called from main thread. Only to be called by sink implementor */
1279 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1280 pa_sink_assert_ref(s);
1281 pa_assert(volume);
1282
1283 s->soft_volume = *volume;
1284
1285 if (PA_SINK_IS_LINKED(s->state))
1286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1287 else
1288 s->thread_info.soft_volume = *volume;
1289 }
1290
1291 /* Called from main thread */
1292 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1293 pa_sink_assert_ref(s);
1294
1295 if (s->refresh_volume || force_refresh) {
1296 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1297
1298 if (s->get_volume)
1299 s->get_volume(s);
1300
1301 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1302
1303 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1304
1305 s->reference_volume = s->virtual_volume;
1306
1307 if (s->flags & PA_SINK_FLAT_VOLUME)
1308 pa_sink_propagate_flat_volume(s);
1309
1310 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1311 }
1312 }
1313
1314 return reference ? &s->reference_volume : &s->virtual_volume;
1315 }
1316
1317 /* Called from main thread */
1318 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1319 pa_sink_assert_ref(s);
1320
1321 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1322
1323 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1324 return;
1325
1326 s->reference_volume = s->virtual_volume = *new_volume;
1327
1328 if (s->flags & PA_SINK_FLAT_VOLUME)
1329 pa_sink_propagate_flat_volume(s);
1330
1331 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1332 }
1333
1334 /* Called from main thread */
1335 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute) {
1336 pa_bool_t old_muted;
1337
1338 pa_sink_assert_ref(s);
1339 pa_assert(PA_SINK_IS_LINKED(s->state));
1340
1341 old_muted = s->muted;
1342 s->muted = mute;
1343
1344 if (s->set_mute)
1345 s->set_mute(s);
1346
1347 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1348
1349 if (old_muted != s->muted)
1350 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1351 }
1352
1353 /* Called from main thread */
1354 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1355
1356 pa_sink_assert_ref(s);
1357
1358 if (s->refresh_muted || force_refresh) {
1359 pa_bool_t old_muted = s->muted;
1360
1361 if (s->get_mute)
1362 s->get_mute(s);
1363
1364 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1365
1366 if (old_muted != s->muted)
1367 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1368 }
1369
1370 return s->muted;
1371 }
1372
1373 /* Called from main thread */
1374 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1375 pa_sink_assert_ref(s);
1376
1377 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1378
1379 if (s->muted == new_muted)
1380 return;
1381
1382 s->muted = new_muted;
1383 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1384 }
1385
1386 /* Called from main thread */
1387 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1388 pa_sink_assert_ref(s);
1389
1390 if (p)
1391 pa_proplist_update(s->proplist, mode, p);
1392
1393 if (PA_SINK_IS_LINKED(s->state)) {
1394 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1395 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1396 }
1397
1398 return TRUE;
1399 }
1400
1401 /* Called from main thread */
1402 void pa_sink_set_description(pa_sink *s, const char *description) {
1403 const char *old;
1404 pa_sink_assert_ref(s);
1405
1406 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1407 return;
1408
1409 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1410
1411 if (old && description && !strcmp(old, description))
1412 return;
1413
1414 if (description)
1415 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1416 else
1417 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1418
1419 if (s->monitor_source) {
1420 char *n;
1421
1422 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1423 pa_source_set_description(s->monitor_source, n);
1424 pa_xfree(n);
1425 }
1426
1427 if (PA_SINK_IS_LINKED(s->state)) {
1428 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1429 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1430 }
1431 }
1432
1433 /* Called from main thread */
1434 unsigned pa_sink_linked_by(pa_sink *s) {
1435 unsigned ret;
1436
1437 pa_sink_assert_ref(s);
1438 pa_assert(PA_SINK_IS_LINKED(s->state));
1439
1440 ret = pa_idxset_size(s->inputs);
1441
1442 /* We add in the number of streams connected to us here. Please
1443 * note the asymmmetry to pa_sink_used_by()! */
1444
1445 if (s->monitor_source)
1446 ret += pa_source_linked_by(s->monitor_source);
1447
1448 return ret;
1449 }
1450
1451 /* Called from main thread */
1452 unsigned pa_sink_used_by(pa_sink *s) {
1453 unsigned ret;
1454
1455 pa_sink_assert_ref(s);
1456 pa_assert(PA_SINK_IS_LINKED(s->state));
1457
1458 ret = pa_idxset_size(s->inputs);
1459 pa_assert(ret >= s->n_corked);
1460
1461 /* Streams connected to our monitor source do not matter for
1462 * pa_sink_used_by()!.*/
1463
1464 return ret - s->n_corked;
1465 }
1466
1467 /* Called from main thread */
1468 unsigned pa_sink_check_suspend(pa_sink *s) {
1469 unsigned ret;
1470 pa_sink_input *i;
1471 uint32_t idx;
1472
1473 pa_sink_assert_ref(s);
1474
1475 if (!PA_SINK_IS_LINKED(s->state))
1476 return 0;
1477
1478 ret = 0;
1479
1480 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1481 pa_sink_input_state_t st;
1482
1483 st = pa_sink_input_get_state(i);
1484 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1485
1486 if (st == PA_SINK_INPUT_CORKED)
1487 continue;
1488
1489 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1490 continue;
1491
1492 ret ++;
1493 }
1494
1495 if (s->monitor_source)
1496 ret += pa_source_check_suspend(s->monitor_source);
1497
1498 return ret;
1499 }
1500
1501 /* Called from the IO thread */
1502 static void sync_input_volumes_within_thread(pa_sink *s) {
1503 pa_sink_input *i;
1504 void *state = NULL;
1505
1506 pa_sink_assert_ref(s);
1507
1508 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1509 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1510 continue;
1511
1512 i->thread_info.soft_volume = i->soft_volume;
1513 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1514 }
1515 }
1516
1517 /* Called from IO thread, except when it is not */
1518 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1519 pa_sink *s = PA_SINK(o);
1520 pa_sink_assert_ref(s);
1521
1522 switch ((pa_sink_message_t) code) {
1523
1524 case PA_SINK_MESSAGE_ADD_INPUT: {
1525 pa_sink_input *i = PA_SINK_INPUT(userdata);
1526
1527 /* If you change anything here, make sure to change the
1528 * sink input handling a few lines down at
1529 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1530
1531 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1532
1533 /* Since the caller sleeps in pa_sink_input_put(), we can
1534 * safely access data outside of thread_info even though
1535 * it is mutable */
1536
1537 if ((i->thread_info.sync_prev = i->sync_prev)) {
1538 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1539 pa_assert(i->sync_prev->sync_next == i);
1540 i->thread_info.sync_prev->thread_info.sync_next = i;
1541 }
1542
1543 if ((i->thread_info.sync_next = i->sync_next)) {
1544 pa_assert(i->sink == i->thread_info.sync_next->sink);
1545 pa_assert(i->sync_next->sync_prev == i);
1546 i->thread_info.sync_next->thread_info.sync_prev = i;
1547 }
1548
1549 pa_assert(!i->thread_info.attached);
1550 i->thread_info.attached = TRUE;
1551
1552 if (i->attach)
1553 i->attach(i);
1554
1555 pa_sink_input_set_state_within_thread(i, i->state);
1556
1557 /* The requested latency of the sink input needs to be
1558 * fixed up and then configured on the sink */
1559
1560 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1561 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1562
1563 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1564 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1565
1566 /* We don't rewind here automatically. This is left to the
1567 * sink input implementor because some sink inputs need a
1568 * slow start, i.e. need some time to buffer client
1569 * samples before beginning streaming. */
1570
1571 /* In flat volume mode we need to update the volume as
1572 * well */
1573 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1574 }
1575
1576 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1577 pa_sink_input *i = PA_SINK_INPUT(userdata);
1578
1579 /* If you change anything here, make sure to change the
1580 * sink input handling a few lines down at
1581 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1582
1583 if (i->detach)
1584 i->detach(i);
1585
1586 pa_sink_input_set_state_within_thread(i, i->state);
1587
1588 pa_assert(i->thread_info.attached);
1589 i->thread_info.attached = FALSE;
1590
1591 /* Since the caller sleeps in pa_sink_input_unlink(),
1592 * we can safely access data outside of thread_info even
1593 * though it is mutable */
1594
1595 pa_assert(!i->sync_prev);
1596 pa_assert(!i->sync_next);
1597
1598 if (i->thread_info.sync_prev) {
1599 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1600 i->thread_info.sync_prev = NULL;
1601 }
1602
1603 if (i->thread_info.sync_next) {
1604 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1605 i->thread_info.sync_next = NULL;
1606 }
1607
1608 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1609 pa_sink_input_unref(i);
1610
1611 pa_sink_invalidate_requested_latency(s);
1612 pa_sink_request_rewind(s, (size_t) -1);
1613
1614 /* In flat volume mode we need to update the volume as
1615 * well */
1616 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1617 }
1618
1619 case PA_SINK_MESSAGE_START_MOVE: {
1620 pa_sink_input *i = PA_SINK_INPUT(userdata);
1621
1622 /* We don't support moving synchronized streams. */
1623 pa_assert(!i->sync_prev);
1624 pa_assert(!i->sync_next);
1625 pa_assert(!i->thread_info.sync_next);
1626 pa_assert(!i->thread_info.sync_prev);
1627
1628 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1629 pa_usec_t usec = 0;
1630 size_t sink_nbytes, total_nbytes;
1631
1632 /* Get the latency of the sink */
1633 if (!(s->flags & PA_SINK_LATENCY) ||
1634 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1635 usec = 0;
1636
1637 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1638 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1639
1640 if (total_nbytes > 0) {
1641 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1642 i->thread_info.rewrite_flush = TRUE;
1643 pa_sink_input_process_rewind(i, sink_nbytes);
1644 }
1645 }
1646
1647 if (i->detach)
1648 i->detach(i);
1649
1650 pa_assert(i->thread_info.attached);
1651 i->thread_info.attached = FALSE;
1652
1653 /* Let's remove the sink input ...*/
1654 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1655 pa_sink_input_unref(i);
1656
1657 pa_sink_invalidate_requested_latency(s);
1658
1659 pa_log_debug("Requesting rewind due to started move");
1660 pa_sink_request_rewind(s, (size_t) -1);
1661
1662 /* In flat volume mode we need to update the volume as
1663 * well */
1664 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1665 }
1666
1667 case PA_SINK_MESSAGE_FINISH_MOVE: {
1668 pa_sink_input *i = PA_SINK_INPUT(userdata);
1669
1670 /* We don't support moving synchronized streams. */
1671 pa_assert(!i->sync_prev);
1672 pa_assert(!i->sync_next);
1673 pa_assert(!i->thread_info.sync_next);
1674 pa_assert(!i->thread_info.sync_prev);
1675
1676 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1677
1678 pa_assert(!i->thread_info.attached);
1679 i->thread_info.attached = TRUE;
1680
1681 if (i->attach)
1682 i->attach(i);
1683
1684 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1685 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1686
1687 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1688 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1689
1690 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1691 pa_usec_t usec = 0;
1692 size_t nbytes;
1693
1694 /* Get the latency of the sink */
1695 if (!(s->flags & PA_SINK_LATENCY) ||
1696 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1697 usec = 0;
1698
1699 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1700
1701 if (nbytes > 0)
1702 pa_sink_input_drop(i, nbytes);
1703
1704 pa_log_debug("Requesting rewind due to finished move");
1705 pa_sink_request_rewind(s, nbytes);
1706 }
1707
1708 /* In flat volume mode we need to update the volume as
1709 * well */
1710 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1711 }
1712
1713 case PA_SINK_MESSAGE_SET_VOLUME:
1714
1715 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1716 s->thread_info.soft_volume = s->soft_volume;
1717 pa_sink_request_rewind(s, (size_t) -1);
1718 }
1719
1720 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1721 return 0;
1722
1723 /* Fall through ... */
1724
1725 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1726 sync_input_volumes_within_thread(s);
1727 return 0;
1728
1729 case PA_SINK_MESSAGE_GET_VOLUME:
1730 return 0;
1731
1732 case PA_SINK_MESSAGE_SET_MUTE:
1733
1734 if (s->thread_info.soft_muted != s->muted) {
1735 s->thread_info.soft_muted = s->muted;
1736 pa_sink_request_rewind(s, (size_t) -1);
1737 }
1738
1739 return 0;
1740
1741 case PA_SINK_MESSAGE_GET_MUTE:
1742 return 0;
1743
1744 case PA_SINK_MESSAGE_SET_STATE: {
1745
1746 pa_bool_t suspend_change =
1747 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1748 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1749
1750 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1751
1752 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1753 s->thread_info.rewind_nbytes = 0;
1754 s->thread_info.rewind_requested = FALSE;
1755 }
1756
1757 if (suspend_change) {
1758 pa_sink_input *i;
1759 void *state = NULL;
1760
1761 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1762 if (i->suspend_within_thread)
1763 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1764 }
1765
1766 return 0;
1767 }
1768
1769 case PA_SINK_MESSAGE_DETACH:
1770
1771 /* Detach all streams */
1772 pa_sink_detach_within_thread(s);
1773 return 0;
1774
1775 case PA_SINK_MESSAGE_ATTACH:
1776
1777 /* Reattach all streams */
1778 pa_sink_attach_within_thread(s);
1779 return 0;
1780
1781 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1782
1783 pa_usec_t *usec = userdata;
1784 *usec = pa_sink_get_requested_latency_within_thread(s);
1785
1786 if (*usec == (pa_usec_t) -1)
1787 *usec = s->thread_info.max_latency;
1788
1789 return 0;
1790 }
1791
1792 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1793 pa_usec_t *r = userdata;
1794
1795 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1796
1797 return 0;
1798 }
1799
1800 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1801 pa_usec_t *r = userdata;
1802
1803 r[0] = s->thread_info.min_latency;
1804 r[1] = s->thread_info.max_latency;
1805
1806 return 0;
1807 }
1808
1809 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1810
1811 *((size_t*) userdata) = s->thread_info.max_rewind;
1812 return 0;
1813
1814 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1815
1816 *((size_t*) userdata) = s->thread_info.max_request;
1817 return 0;
1818
1819 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1820
1821 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1822 return 0;
1823
1824 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1825
1826 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1827 return 0;
1828
1829 case PA_SINK_MESSAGE_GET_LATENCY:
1830 case PA_SINK_MESSAGE_MAX:
1831 ;
1832 }
1833
1834 return -1;
1835 }
1836
1837 /* Called from main thread */
1838 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1839 pa_sink *sink;
1840 uint32_t idx;
1841 int ret = 0;
1842
1843 pa_core_assert_ref(c);
1844 pa_assert(cause != 0);
1845
1846 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1847 int r;
1848
1849 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1850 ret = r;
1851 }
1852
1853 return ret;
1854 }
1855
1856 /* Called from main thread */
1857 void pa_sink_detach(pa_sink *s) {
1858 pa_sink_assert_ref(s);
1859 pa_assert(PA_SINK_IS_LINKED(s->state));
1860
1861 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1862 }
1863
1864 /* Called from main thread */
1865 void pa_sink_attach(pa_sink *s) {
1866 pa_sink_assert_ref(s);
1867 pa_assert(PA_SINK_IS_LINKED(s->state));
1868
1869 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1870 }
1871
1872 /* Called from IO thread */
1873 void pa_sink_detach_within_thread(pa_sink *s) {
1874 pa_sink_input *i;
1875 void *state = NULL;
1876
1877 pa_sink_assert_ref(s);
1878 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1879
1880 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1881 if (i->detach)
1882 i->detach(i);
1883
1884 if (s->monitor_source)
1885 pa_source_detach_within_thread(s->monitor_source);
1886 }
1887
1888 /* Called from IO thread */
1889 void pa_sink_attach_within_thread(pa_sink *s) {
1890 pa_sink_input *i;
1891 void *state = NULL;
1892
1893 pa_sink_assert_ref(s);
1894 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1895
1896 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1897 if (i->attach)
1898 i->attach(i);
1899
1900 if (s->monitor_source)
1901 pa_source_attach_within_thread(s->monitor_source);
1902 }
1903
1904 /* Called from IO thread */
1905 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1906 pa_sink_assert_ref(s);
1907 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1908
1909 if (s->thread_info.state == PA_SINK_SUSPENDED)
1910 return;
1911
1912 if (nbytes == (size_t) -1)
1913 nbytes = s->thread_info.max_rewind;
1914
1915 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
1916
1917 if (s->thread_info.rewind_requested &&
1918 nbytes <= s->thread_info.rewind_nbytes)
1919 return;
1920
1921 s->thread_info.rewind_nbytes = nbytes;
1922 s->thread_info.rewind_requested = TRUE;
1923
1924 if (s->request_rewind)
1925 s->request_rewind(s);
1926 }
1927
1928 /* Called from IO thread */
1929 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
1930 pa_usec_t result = (pa_usec_t) -1;
1931 pa_sink_input *i;
1932 void *state = NULL;
1933 pa_usec_t monitor_latency;
1934
1935 pa_sink_assert_ref(s);
1936
1937 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
1938 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
1939
1940 if (s->thread_info.requested_latency_valid)
1941 return s->thread_info.requested_latency;
1942
1943 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1944
1945 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
1946 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
1947 result = i->thread_info.requested_sink_latency;
1948
1949 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
1950
1951 if (monitor_latency != (pa_usec_t) -1 &&
1952 (result == (pa_usec_t) -1 || result > monitor_latency))
1953 result = monitor_latency;
1954
1955 if (result != (pa_usec_t) -1)
1956 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
1957
1958 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1959 /* Only cache if properly initialized */
1960 s->thread_info.requested_latency = result;
1961 s->thread_info.requested_latency_valid = TRUE;
1962 }
1963
1964 return result;
1965 }
1966
1967 /* Called from main thread */
1968 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
1969 pa_usec_t usec = 0;
1970
1971 pa_sink_assert_ref(s);
1972 pa_assert(PA_SINK_IS_LINKED(s->state));
1973
1974 if (s->state == PA_SINK_SUSPENDED)
1975 return 0;
1976
1977 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
1978 return usec;
1979 }
1980
1981 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
1982 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
1983 pa_sink_input *i;
1984 void *state = NULL;
1985
1986 pa_sink_assert_ref(s);
1987
1988 if (max_rewind == s->thread_info.max_rewind)
1989 return;
1990
1991 s->thread_info.max_rewind = max_rewind;
1992
1993 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
1994 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1995 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1996 }
1997
1998 if (s->monitor_source)
1999 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2000 }
2001
2002 /* Called from main thread */
2003 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2004 pa_sink_assert_ref(s);
2005
2006 if (PA_SINK_IS_LINKED(s->state))
2007 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2008 else
2009 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2010 }
2011
2012 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2013 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2014 void *state = NULL;
2015
2016 pa_sink_assert_ref(s);
2017
2018 if (max_request == s->thread_info.max_request)
2019 return;
2020
2021 s->thread_info.max_request = max_request;
2022
2023 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2024 pa_sink_input *i;
2025
2026 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2027 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2028 }
2029 }
2030
2031 /* Called from main thread */
2032 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2033 pa_sink_assert_ref(s);
2034
2035 if (PA_SINK_IS_LINKED(s->state))
2036 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2037 else
2038 pa_sink_set_max_request_within_thread(s, max_request);
2039 }
2040
2041 /* Called from IO thread */
2042 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2043 pa_sink_input *i;
2044 void *state = NULL;
2045
2046 pa_sink_assert_ref(s);
2047
2048 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2049 return;
2050
2051 s->thread_info.requested_latency_valid = FALSE;
2052
2053 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2054
2055 if (s->update_requested_latency)
2056 s->update_requested_latency(s);
2057
2058 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2059 if (i->update_sink_requested_latency)
2060 i->update_sink_requested_latency(i);
2061 }
2062 }
2063
2064 /* Called from main thread */
2065 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2066 pa_sink_assert_ref(s);
2067
2068 /* min_latency == 0: no limit
2069 * min_latency anything else: specified limit
2070 *
2071 * Similar for max_latency */
2072
2073 if (min_latency < ABSOLUTE_MIN_LATENCY)
2074 min_latency = ABSOLUTE_MIN_LATENCY;
2075
2076 if (max_latency <= 0 ||
2077 max_latency > ABSOLUTE_MAX_LATENCY)
2078 max_latency = ABSOLUTE_MAX_LATENCY;
2079
2080 pa_assert(min_latency <= max_latency);
2081
2082 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2083 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2084 max_latency == ABSOLUTE_MAX_LATENCY) ||
2085 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2086
2087 if (PA_SINK_IS_LINKED(s->state)) {
2088 pa_usec_t r[2];
2089
2090 r[0] = min_latency;
2091 r[1] = max_latency;
2092
2093 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2094 } else
2095 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2096 }
2097
2098 /* Called from main thread */
2099 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2100 pa_sink_assert_ref(s);
2101 pa_assert(min_latency);
2102 pa_assert(max_latency);
2103
2104 if (PA_SINK_IS_LINKED(s->state)) {
2105 pa_usec_t r[2] = { 0, 0 };
2106
2107 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2108
2109 *min_latency = r[0];
2110 *max_latency = r[1];
2111 } else {
2112 *min_latency = s->thread_info.min_latency;
2113 *max_latency = s->thread_info.max_latency;
2114 }
2115 }
2116
2117 /* Called from IO thread */
2118 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2119 void *state = NULL;
2120
2121 pa_sink_assert_ref(s);
2122
2123 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2124 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2125 pa_assert(min_latency <= max_latency);
2126
2127 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2128 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2129 max_latency == ABSOLUTE_MAX_LATENCY) ||
2130 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2131
2132 s->thread_info.min_latency = min_latency;
2133 s->thread_info.max_latency = max_latency;
2134
2135 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2136 pa_sink_input *i;
2137
2138 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2139 if (i->update_sink_latency_range)
2140 i->update_sink_latency_range(i);
2141 }
2142
2143 pa_sink_invalidate_requested_latency(s);
2144
2145 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2146 }
2147
2148 /* Called from main thread, before the sink is put */
2149 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2150 pa_sink_assert_ref(s);
2151
2152 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2153
2154 if (latency < ABSOLUTE_MIN_LATENCY)
2155 latency = ABSOLUTE_MIN_LATENCY;
2156
2157 if (latency > ABSOLUTE_MAX_LATENCY)
2158 latency = ABSOLUTE_MAX_LATENCY;
2159
2160 s->fixed_latency = latency;
2161 pa_source_set_fixed_latency(s->monitor_source, latency);
2162 }
2163
2164 /* Called from main context */
2165 size_t pa_sink_get_max_rewind(pa_sink *s) {
2166 size_t r;
2167 pa_sink_assert_ref(s);
2168
2169 if (!PA_SINK_IS_LINKED(s->state))
2170 return s->thread_info.max_rewind;
2171
2172 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2173
2174 return r;
2175 }
2176
2177 /* Called from main context */
2178 size_t pa_sink_get_max_request(pa_sink *s) {
2179 size_t r;
2180 pa_sink_assert_ref(s);
2181
2182 if (!PA_SINK_IS_LINKED(s->state))
2183 return s->thread_info.max_request;
2184
2185 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2186
2187 return r;
2188 }
2189
2190 /* Called from main context */
2191 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2192 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2193
2194 pa_assert(p);
2195
2196 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2197 return TRUE;
2198
2199 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2200
2201 if (pa_streq(ff, "microphone"))
2202 t = "audio-input-microphone";
2203 else if (pa_streq(ff, "webcam"))
2204 t = "camera-web";
2205 else if (pa_streq(ff, "computer"))
2206 t = "computer";
2207 else if (pa_streq(ff, "handset"))
2208 t = "phone";
2209 else if (pa_streq(ff, "portable"))
2210 t = "multimedia-player";
2211 else if (pa_streq(ff, "tv"))
2212 t = "video-display";
2213
2214 /*
2215 * The following icons are not part of the icon naming spec,
2216 * because Rodney Dawes sucks as the maintainer of that spec.
2217 *
2218 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2219 */
2220 else if (pa_streq(ff, "headset"))
2221 t = "audio-headset";
2222 else if (pa_streq(ff, "headphone"))
2223 t = "audio-headphones";
2224 else if (pa_streq(ff, "speaker"))
2225 t = "audio-speakers";
2226 else if (pa_streq(ff, "hands-free"))
2227 t = "audio-handsfree";
2228 }
2229
2230 if (!t)
2231 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2232 if (pa_streq(c, "modem"))
2233 t = "modem";
2234
2235 if (!t) {
2236 if (is_sink)
2237 t = "audio-card";
2238 else
2239 t = "audio-input-microphone";
2240 }
2241
2242 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2243 if (strstr(profile, "analog"))
2244 s = "-analog";
2245 else if (strstr(profile, "iec958"))
2246 s = "-iec958";
2247 else if (strstr(profile, "hdmi"))
2248 s = "-hdmi";
2249 }
2250
2251 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2252
2253 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2254
2255 return TRUE;
2256 }
2257
2258 pa_bool_t pa_device_init_description(pa_proplist *p) {
2259 const char *s;
2260 pa_assert(p);
2261
2262 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2263 return TRUE;
2264
2265 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2266 if (pa_streq(s, "internal")) {
2267 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2268 return TRUE;
2269 }
2270
2271 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2272 if (pa_streq(s, "modem")) {
2273 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2274 return TRUE;
2275 }
2276
2277 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2278 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2279 return TRUE;
2280 }
2281
2282 return FALSE;
2283 }