]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Merge commit 'origin/master' into master-tx
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180
181 s = pa_msgobject_new(pa_sink);
182
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
184 pa_xfree(s);
185 return NULL;
186 }
187
188 pa_sink_new_data_set_name(data, name);
189
190 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
191 pa_xfree(s);
192 pa_namereg_unregister(core, name);
193 return NULL;
194 }
195
196 /* FIXME, need to free s here on failure */
197
198 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
199 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
200
201 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
202
203 if (!data->channel_map_is_set)
204 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
205
206 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
207 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
208
209 if (!data->volume_is_set)
210 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
211
212 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
213 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
214
215 if (!data->muted_is_set)
216 data->muted = FALSE;
217
218 if (data->card)
219 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
220
221 pa_device_init_description(data->proplist);
222 pa_device_init_icon(data->proplist, TRUE);
223 pa_device_init_intended_roles(data->proplist);
224
225 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
226 pa_xfree(s);
227 pa_namereg_unregister(core, name);
228 return NULL;
229 }
230
231 s->parent.parent.free = sink_free;
232 s->parent.process_msg = pa_sink_process_msg;
233
234 s->core = core;
235 s->state = PA_SINK_INIT;
236 s->flags = flags;
237 s->suspend_cause = 0;
238 s->name = pa_xstrdup(name);
239 s->proplist = pa_proplist_copy(data->proplist);
240 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
241 s->module = data->module;
242 s->card = data->card;
243
244 s->sample_spec = data->sample_spec;
245 s->channel_map = data->channel_map;
246
247 s->inputs = pa_idxset_new(NULL, NULL);
248 s->n_corked = 0;
249
250 s->reference_volume = s->virtual_volume = data->volume;
251 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
252 s->base_volume = PA_VOLUME_NORM;
253 s->n_volume_steps = PA_VOLUME_NORM+1;
254 s->muted = data->muted;
255 s->refresh_volume = s->refresh_muted = FALSE;
256
257 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263 s->rtpoll = NULL;
264
265 /* As a minor optimization we just steal the list instead of
266 * copying it here */
267 s->ports = data->ports;
268 data->ports = NULL;
269
270 s->active_port = NULL;
271 s->save_port = FALSE;
272
273 if (data->active_port && s->ports)
274 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
275 s->save_port = data->save_port;
276
277 if (!s->active_port && s->ports) {
278 void *state;
279 pa_device_port *p;
280
281 PA_HASHMAP_FOREACH(p, s->ports, state)
282 if (!s->active_port || p->priority > s->active_port->priority)
283 s->active_port = p;
284 }
285
286 s->save_volume = data->save_volume;
287 s->save_muted = data->save_muted;
288
289 pa_silence_memchunk_get(
290 &core->silence_cache,
291 core->mempool,
292 &s->silence,
293 &s->sample_spec,
294 0);
295
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308
309 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
310
311 if (s->card)
312 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
313
314 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
315 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
316 s->index,
317 s->name,
318 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
319 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
320 pt);
321 pa_xfree(pt);
322
323 pa_source_new_data_init(&source_data);
324 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
325 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
326 source_data.name = pa_sprintf_malloc("%s.monitor", name);
327 source_data.driver = data->driver;
328 source_data.module = data->module;
329 source_data.card = data->card;
330
331 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
332 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
333 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
334
335 s->monitor_source = pa_source_new(core, &source_data,
336 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
337 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
338
339 pa_source_new_data_done(&source_data);
340
341 if (!s->monitor_source) {
342 pa_sink_unlink(s);
343 pa_sink_unref(s);
344 return NULL;
345 }
346
347 s->monitor_source->monitor_of = s;
348
349 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
350 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
351
352 return s;
353 }
354
355 /* Called from main context */
356 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
357 int ret;
358 pa_bool_t suspend_change;
359 pa_sink_state_t original_state;
360
361 pa_assert(s);
362
363 if (s->state == state)
364 return 0;
365
366 original_state = s->state;
367
368 suspend_change =
369 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
370 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
371
372 if (s->set_state)
373 if ((ret = s->set_state(s, state)) < 0)
374 return ret;
375
376 if (s->asyncmsgq)
377 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
378
379 if (s->set_state)
380 s->set_state(s, original_state);
381
382 return ret;
383 }
384
385 s->state = state;
386
387 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
388 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
389 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
390 }
391
392 if (suspend_change) {
393 pa_sink_input *i;
394 uint32_t idx;
395
396 /* We're suspending or resuming, tell everyone about it */
397
398 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
399 if (s->state == PA_SINK_SUSPENDED &&
400 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
401 pa_sink_input_kill(i);
402 else if (i->suspend)
403 i->suspend(i, state == PA_SINK_SUSPENDED);
404
405 if (s->monitor_source)
406 pa_source_sync_suspend(s->monitor_source);
407 }
408
409 return 0;
410 }
411
412 /* Called from main context */
413 void pa_sink_put(pa_sink* s) {
414 pa_sink_assert_ref(s);
415
416 pa_assert(s->state == PA_SINK_INIT);
417
418 /* The following fields must be initialized properly when calling _put() */
419 pa_assert(s->asyncmsgq);
420 pa_assert(s->rtpoll);
421 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
422
423 /* Generally, flags should be initialized via pa_sink_new(). As a
424 * special exception we allow volume related flags to be set
425 * between _new() and _put(). */
426
427 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
428 s->flags |= PA_SINK_DECIBEL_VOLUME;
429
430 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
431 s->flags |= PA_SINK_FLAT_VOLUME;
432
433 s->thread_info.soft_volume = s->soft_volume;
434 s->thread_info.soft_muted = s->muted;
435
436 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
437 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
438 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
439 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
440 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
441
442 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
443 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
444 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
445
446 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
447
448 pa_source_put(s->monitor_source);
449
450 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
451 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
452 }
453
454 /* Called from main context */
455 void pa_sink_unlink(pa_sink* s) {
456 pa_bool_t linked;
457 pa_sink_input *i, *j = NULL;
458
459 pa_assert(s);
460
461 /* Please note that pa_sink_unlink() does more than simply
462 * reversing pa_sink_put(). It also undoes the registrations
463 * already done in pa_sink_new()! */
464
465 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
466 * may be called multiple times on the same sink without bad
467 * effects. */
468
469 linked = PA_SINK_IS_LINKED(s->state);
470
471 if (linked)
472 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
473
474 if (s->state != PA_SINK_UNLINKED)
475 pa_namereg_unregister(s->core, s->name);
476 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
477
478 if (s->card)
479 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
480
481 while ((i = pa_idxset_first(s->inputs, NULL))) {
482 pa_assert(i != j);
483 pa_sink_input_kill(i);
484 j = i;
485 }
486
487 if (linked)
488 sink_set_state(s, PA_SINK_UNLINKED);
489 else
490 s->state = PA_SINK_UNLINKED;
491
492 reset_callbacks(s);
493
494 if (s->monitor_source)
495 pa_source_unlink(s->monitor_source);
496
497 if (linked) {
498 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
499 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
500 }
501 }
502
503 /* Called from main context */
504 static void sink_free(pa_object *o) {
505 pa_sink *s = PA_SINK(o);
506 pa_sink_input *i;
507
508 pa_assert(s);
509 pa_assert(pa_sink_refcnt(s) == 0);
510
511 if (PA_SINK_IS_LINKED(s->state))
512 pa_sink_unlink(s);
513
514 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
515
516 if (s->monitor_source) {
517 pa_source_unref(s->monitor_source);
518 s->monitor_source = NULL;
519 }
520
521 pa_idxset_free(s->inputs, NULL, NULL);
522
523 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
524 pa_sink_input_unref(i);
525
526 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
527
528 if (s->silence.memblock)
529 pa_memblock_unref(s->silence.memblock);
530
531 pa_xfree(s->name);
532 pa_xfree(s->driver);
533
534 if (s->proplist)
535 pa_proplist_free(s->proplist);
536
537 if (s->ports) {
538 pa_device_port *p;
539
540 while ((p = pa_hashmap_steal_first(s->ports)))
541 pa_device_port_free(p);
542
543 pa_hashmap_free(s->ports, NULL, NULL);
544 }
545
546 pa_xfree(s);
547 }
548
549 /* Called from main context */
550 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
551 pa_sink_assert_ref(s);
552
553 s->asyncmsgq = q;
554
555 if (s->monitor_source)
556 pa_source_set_asyncmsgq(s->monitor_source, q);
557 }
558
559 /* Called from main context */
560 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
561 pa_sink_assert_ref(s);
562
563 s->rtpoll = p;
564
565 if (s->monitor_source)
566 pa_source_set_rtpoll(s->monitor_source, p);
567 }
568
569 /* Called from main context */
570 int pa_sink_update_status(pa_sink*s) {
571 pa_sink_assert_ref(s);
572 pa_assert(PA_SINK_IS_LINKED(s->state));
573
574 if (s->state == PA_SINK_SUSPENDED)
575 return 0;
576
577 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
578 }
579
580 /* Called from main context */
581 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
582 pa_sink_assert_ref(s);
583 pa_assert(PA_SINK_IS_LINKED(s->state));
584 pa_assert(cause != 0);
585
586 if (suspend) {
587 s->suspend_cause |= cause;
588 s->monitor_source->suspend_cause |= cause;
589 } else {
590 s->suspend_cause &= ~cause;
591 s->monitor_source->suspend_cause &= ~cause;
592 }
593
594 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
595 return 0;
596
597 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
598
599 if (s->suspend_cause)
600 return sink_set_state(s, PA_SINK_SUSPENDED);
601 else
602 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
603 }
604
605 /* Called from main context */
606 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
607 pa_sink_input *i, *n;
608 uint32_t idx;
609
610 pa_sink_assert_ref(s);
611 pa_assert(PA_SINK_IS_LINKED(s->state));
612
613 if (!q)
614 q = pa_queue_new();
615
616 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
617 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
618
619 pa_sink_input_ref(i);
620
621 if (pa_sink_input_start_move(i) >= 0)
622 pa_queue_push(q, i);
623 else
624 pa_sink_input_unref(i);
625 }
626
627 return q;
628 }
629
630 /* Called from main context */
631 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
632 pa_sink_input *i;
633
634 pa_sink_assert_ref(s);
635 pa_assert(PA_SINK_IS_LINKED(s->state));
636 pa_assert(q);
637
638 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
639 if (pa_sink_input_finish_move(i, s, save) < 0)
640 pa_sink_input_kill(i);
641
642 pa_sink_input_unref(i);
643 }
644
645 pa_queue_free(q, NULL, NULL);
646 }
647
648 /* Called from main context */
649 void pa_sink_move_all_fail(pa_queue *q) {
650 pa_sink_input *i;
651 pa_assert(q);
652
653 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
654 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
655 pa_sink_input_kill(i);
656 pa_sink_input_unref(i);
657 }
658 }
659
660 pa_queue_free(q, NULL, NULL);
661 }
662
663 /* Called from IO thread context */
664 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
665 pa_sink_input *i;
666 void *state = NULL;
667 pa_sink_assert_ref(s);
668 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
669
670 /* If nobody requested this and this is actually no real rewind
671 * then we can short cut this */
672 if (!s->thread_info.rewind_requested && nbytes <= 0)
673 return;
674
675 s->thread_info.rewind_nbytes = 0;
676 s->thread_info.rewind_requested = FALSE;
677
678 if (s->thread_info.state == PA_SINK_SUSPENDED)
679 return;
680
681 if (nbytes > 0)
682 pa_log_debug("Processing rewind...");
683
684 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
685 pa_sink_input_assert_ref(i);
686 pa_sink_input_process_rewind(i, nbytes);
687 }
688
689 if (nbytes > 0)
690 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
691 pa_source_process_rewind(s->monitor_source, nbytes);
692 }
693
694 /* Called from IO thread context */
695 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
696 pa_sink_input *i;
697 unsigned n = 0;
698 void *state = NULL;
699 size_t mixlength = *length;
700
701 pa_sink_assert_ref(s);
702 pa_assert(info);
703
704 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
705 pa_sink_input_assert_ref(i);
706
707 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
708
709 if (mixlength == 0 || info->chunk.length < mixlength)
710 mixlength = info->chunk.length;
711
712 if (pa_memblock_is_silence(info->chunk.memblock)) {
713 pa_memblock_unref(info->chunk.memblock);
714 continue;
715 }
716
717 info->userdata = pa_sink_input_ref(i);
718
719 pa_assert(info->chunk.memblock);
720 pa_assert(info->chunk.length > 0);
721
722 info++;
723 n++;
724 maxinfo--;
725 }
726
727 if (mixlength > 0)
728 *length = mixlength;
729
730 return n;
731 }
732
733 /* Called from IO thread context */
734 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
735 pa_sink_input *i;
736 void *state = NULL;
737 unsigned p = 0;
738 unsigned n_unreffed = 0;
739
740 pa_sink_assert_ref(s);
741 pa_assert(result);
742 pa_assert(result->memblock);
743 pa_assert(result->length > 0);
744
745 /* We optimize for the case where the order of the inputs has not changed */
746
747 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
748 unsigned j;
749 pa_mix_info* m = NULL;
750
751 pa_sink_input_assert_ref(i);
752
753 /* Let's try to find the matching entry info the pa_mix_info array */
754 for (j = 0; j < n; j ++) {
755
756 if (info[p].userdata == i) {
757 m = info + p;
758 break;
759 }
760
761 p++;
762 if (p >= n)
763 p = 0;
764 }
765
766 /* Drop read data */
767 pa_sink_input_drop(i, result->length);
768
769 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
770
771 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
772 void *ostate = NULL;
773 pa_source_output *o;
774 pa_memchunk c;
775
776 if (m && m->chunk.memblock) {
777 c = m->chunk;
778 pa_memblock_ref(c.memblock);
779 pa_assert(result->length <= c.length);
780 c.length = result->length;
781
782 pa_memchunk_make_writable(&c, 0);
783 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
784 } else {
785 c = s->silence;
786 pa_memblock_ref(c.memblock);
787 pa_assert(result->length <= c.length);
788 c.length = result->length;
789 }
790
791 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
792 pa_source_output_assert_ref(o);
793 pa_assert(o->direct_on_input == i);
794 pa_source_post_direct(s->monitor_source, o, &c);
795 }
796
797 pa_memblock_unref(c.memblock);
798 }
799 }
800
801 if (m) {
802 if (m->chunk.memblock)
803 pa_memblock_unref(m->chunk.memblock);
804 pa_memchunk_reset(&m->chunk);
805
806 pa_sink_input_unref(m->userdata);
807 m->userdata = NULL;
808
809 n_unreffed += 1;
810 }
811 }
812
813 /* Now drop references to entries that are included in the
814 * pa_mix_info array but don't exist anymore */
815
816 if (n_unreffed < n) {
817 for (; n > 0; info++, n--) {
818 if (info->userdata)
819 pa_sink_input_unref(info->userdata);
820 if (info->chunk.memblock)
821 pa_memblock_unref(info->chunk.memblock);
822 }
823 }
824
825 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
826 pa_source_post(s->monitor_source, result);
827 }
828
829 /* Called from IO thread context */
830 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
831 pa_mix_info info[MAX_MIX_CHANNELS];
832 unsigned n;
833 size_t block_size_max;
834
835 pa_sink_assert_ref(s);
836 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
837 pa_assert(pa_frame_aligned(length, &s->sample_spec));
838 pa_assert(result);
839
840 pa_sink_ref(s);
841
842 pa_assert(!s->thread_info.rewind_requested);
843 pa_assert(s->thread_info.rewind_nbytes == 0);
844
845 if (s->thread_info.state == PA_SINK_SUSPENDED) {
846 result->memblock = pa_memblock_ref(s->silence.memblock);
847 result->index = s->silence.index;
848 result->length = PA_MIN(s->silence.length, length);
849 return;
850 }
851
852 if (length <= 0)
853 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
854
855 block_size_max = pa_mempool_block_size_max(s->core->mempool);
856 if (length > block_size_max)
857 length = pa_frame_align(block_size_max, &s->sample_spec);
858
859 pa_assert(length > 0);
860
861 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
862
863 if (n == 0) {
864
865 *result = s->silence;
866 pa_memblock_ref(result->memblock);
867
868 if (result->length > length)
869 result->length = length;
870
871 } else if (n == 1) {
872 pa_cvolume volume;
873
874 *result = info[0].chunk;
875 pa_memblock_ref(result->memblock);
876
877 if (result->length > length)
878 result->length = length;
879
880 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
881
882 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
883 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
884 pa_memblock_unref(result->memblock);
885 pa_silence_memchunk_get(&s->core->silence_cache,
886 s->core->mempool,
887 result,
888 &s->sample_spec,
889 result->length);
890 } else {
891 pa_memchunk_make_writable(result, 0);
892 pa_volume_memchunk(result, &s->sample_spec, &volume);
893 }
894 }
895 } else {
896 void *ptr;
897 result->memblock = pa_memblock_new(s->core->mempool, length);
898
899 ptr = pa_memblock_acquire(result->memblock);
900 result->length = pa_mix(info, n,
901 ptr, length,
902 &s->sample_spec,
903 &s->thread_info.soft_volume,
904 s->thread_info.soft_muted);
905 pa_memblock_release(result->memblock);
906
907 result->index = 0;
908 }
909
910 inputs_drop(s, info, n, result);
911
912 pa_sink_unref(s);
913 }
914
915 /* Called from IO thread context */
916 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
917 pa_mix_info info[MAX_MIX_CHANNELS];
918 unsigned n;
919 size_t length, block_size_max;
920
921 pa_sink_assert_ref(s);
922 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
923 pa_assert(target);
924 pa_assert(target->memblock);
925 pa_assert(target->length > 0);
926 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
927
928 pa_sink_ref(s);
929
930 pa_assert(!s->thread_info.rewind_requested);
931 pa_assert(s->thread_info.rewind_nbytes == 0);
932
933 if (s->thread_info.state == PA_SINK_SUSPENDED) {
934 pa_silence_memchunk(target, &s->sample_spec);
935 return;
936 }
937
938 length = target->length;
939 block_size_max = pa_mempool_block_size_max(s->core->mempool);
940 if (length > block_size_max)
941 length = pa_frame_align(block_size_max, &s->sample_spec);
942
943 pa_assert(length > 0);
944
945 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
946
947 if (n == 0) {
948 if (target->length > length)
949 target->length = length;
950
951 pa_silence_memchunk(target, &s->sample_spec);
952 } else if (n == 1) {
953 pa_cvolume volume;
954
955 if (target->length > length)
956 target->length = length;
957
958 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
959
960 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
961 pa_silence_memchunk(target, &s->sample_spec);
962 else {
963 pa_memchunk vchunk;
964
965 vchunk = info[0].chunk;
966 pa_memblock_ref(vchunk.memblock);
967
968 if (vchunk.length > length)
969 vchunk.length = length;
970
971 if (!pa_cvolume_is_norm(&volume)) {
972 pa_memchunk_make_writable(&vchunk, 0);
973 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
974 }
975
976 pa_memchunk_memcpy(target, &vchunk);
977 pa_memblock_unref(vchunk.memblock);
978 }
979
980 } else {
981 void *ptr;
982
983 ptr = pa_memblock_acquire(target->memblock);
984
985 target->length = pa_mix(info, n,
986 (uint8_t*) ptr + target->index, length,
987 &s->sample_spec,
988 &s->thread_info.soft_volume,
989 s->thread_info.soft_muted);
990
991 pa_memblock_release(target->memblock);
992 }
993
994 inputs_drop(s, info, n, target);
995
996 pa_sink_unref(s);
997 }
998
999 /* Called from IO thread context */
1000 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1001 pa_memchunk chunk;
1002 size_t l, d;
1003
1004 pa_sink_assert_ref(s);
1005 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1006 pa_assert(target);
1007 pa_assert(target->memblock);
1008 pa_assert(target->length > 0);
1009 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1010
1011 pa_sink_ref(s);
1012
1013 pa_assert(!s->thread_info.rewind_requested);
1014 pa_assert(s->thread_info.rewind_nbytes == 0);
1015
1016 l = target->length;
1017 d = 0;
1018 while (l > 0) {
1019 chunk = *target;
1020 chunk.index += d;
1021 chunk.length -= d;
1022
1023 pa_sink_render_into(s, &chunk);
1024
1025 d += chunk.length;
1026 l -= chunk.length;
1027 }
1028
1029 pa_sink_unref(s);
1030 }
1031
1032 /* Called from IO thread context */
1033 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1034 pa_mix_info info[MAX_MIX_CHANNELS];
1035 size_t length1st = length;
1036 unsigned n;
1037
1038 pa_sink_assert_ref(s);
1039 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1040 pa_assert(length > 0);
1041 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1042 pa_assert(result);
1043
1044 pa_sink_ref(s);
1045
1046 pa_assert(!s->thread_info.rewind_requested);
1047 pa_assert(s->thread_info.rewind_nbytes == 0);
1048
1049 pa_assert(length > 0);
1050
1051 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1052
1053 if (n == 0) {
1054 pa_silence_memchunk_get(&s->core->silence_cache,
1055 s->core->mempool,
1056 result,
1057 &s->sample_spec,
1058 length1st);
1059 } else if (n == 1) {
1060 pa_cvolume volume;
1061
1062 *result = info[0].chunk;
1063 pa_memblock_ref(result->memblock);
1064
1065 if (result->length > length)
1066 result->length = length;
1067
1068 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1069
1070 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1071 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1072 pa_memblock_unref(result->memblock);
1073 pa_silence_memchunk_get(&s->core->silence_cache,
1074 s->core->mempool,
1075 result,
1076 &s->sample_spec,
1077 result->length);
1078 } else {
1079 pa_memchunk_make_writable(result, length);
1080 pa_volume_memchunk(result, &s->sample_spec, &volume);
1081 }
1082 }
1083 } else {
1084 void *ptr;
1085
1086 result->index = 0;
1087 result->memblock = pa_memblock_new(s->core->mempool, length);
1088
1089 ptr = pa_memblock_acquire(result->memblock);
1090
1091 result->length = pa_mix(info, n,
1092 (uint8_t*) ptr + result->index, length1st,
1093 &s->sample_spec,
1094 &s->thread_info.soft_volume,
1095 s->thread_info.soft_muted);
1096
1097 pa_memblock_release(result->memblock);
1098 }
1099
1100 inputs_drop(s, info, n, result);
1101
1102 if (result->length < length) {
1103 pa_memchunk chunk;
1104 size_t l, d;
1105 pa_memchunk_make_writable(result, length);
1106
1107 l = length - result->length;
1108 d = result->index + result->length;
1109 while (l > 0) {
1110 chunk = *result;
1111 chunk.index = d;
1112 chunk.length = l;
1113
1114 pa_sink_render_into(s, &chunk);
1115
1116 d += chunk.length;
1117 l -= chunk.length;
1118 }
1119 result->length = length;
1120 }
1121
1122 pa_sink_unref(s);
1123 }
1124
1125 /* Called from main thread */
1126 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1127 pa_usec_t usec = 0;
1128
1129 pa_sink_assert_ref(s);
1130 pa_assert(PA_SINK_IS_LINKED(s->state));
1131
1132 /* The returned value is supposed to be in the time domain of the sound card! */
1133
1134 if (s->state == PA_SINK_SUSPENDED)
1135 return 0;
1136
1137 if (!(s->flags & PA_SINK_LATENCY))
1138 return 0;
1139
1140 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1141
1142 return usec;
1143 }
1144
1145 /* Called from IO thread */
1146 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1147 pa_usec_t usec = 0;
1148 pa_msgobject *o;
1149
1150 pa_sink_assert_ref(s);
1151 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1152
1153 /* The returned value is supposed to be in the time domain of the sound card! */
1154
1155 if (s->thread_info.state == PA_SINK_SUSPENDED)
1156 return 0;
1157
1158 if (!(s->flags & PA_SINK_LATENCY))
1159 return 0;
1160
1161 o = PA_MSGOBJECT(s);
1162
1163 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1164
1165 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1166 return -1;
1167
1168 return usec;
1169 }
1170
1171 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1172 unsigned c;
1173
1174 pa_sink_input_assert_ref(i);
1175 pa_assert(new_volume->channels == i->sample_spec.channels);
1176
1177 /*
1178 * This basically calculates:
1179 *
1180 * i->relative_volume := i->virtual_volume / new_volume
1181 * i->soft_volume := i->relative_volume * i->volume_factor
1182 */
1183
1184 /* The new sink volume passed in here must already be remapped to
1185 * the sink input's channel map! */
1186
1187 i->soft_volume.channels = i->sample_spec.channels;
1188
1189 for (c = 0; c < i->sample_spec.channels; c++)
1190
1191 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1192 /* We leave i->relative_volume untouched */
1193 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1194 else {
1195 i->relative_volume[c] =
1196 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1197 pa_sw_volume_to_linear(new_volume->values[c]);
1198
1199 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1200 i->relative_volume[c] *
1201 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1202 }
1203
1204 /* Hooks have the ability to play games with i->soft_volume */
1205 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1206
1207 /* We don't copy the soft_volume to the thread_info data
1208 * here. That must be done by the caller */
1209 }
1210
1211 /* Called from main thread */
1212 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1213 pa_sink_input *i;
1214 uint32_t idx;
1215
1216 pa_sink_assert_ref(s);
1217 pa_assert(new_volume);
1218 pa_assert(PA_SINK_IS_LINKED(s->state));
1219 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1220
1221 /* This is called whenever a sink input volume changes or a sink
1222 * input is added/removed and we might need to fix up the sink
1223 * volume accordingly. Please note that we don't actually update
1224 * the sinks volume here, we only return how it needs to be
1225 * updated. The caller should then call pa_sink_set_volume().*/
1226
1227 if (pa_idxset_isempty(s->inputs)) {
1228 /* In the special case that we have no sink input we leave the
1229 * volume unmodified. */
1230 *new_volume = s->reference_volume;
1231 return;
1232 }
1233
1234 pa_cvolume_mute(new_volume, s->channel_map.channels);
1235
1236 /* First let's determine the new maximum volume of all inputs
1237 * connected to this sink */
1238 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1239 unsigned c;
1240 pa_cvolume remapped_volume;
1241
1242 remapped_volume = i->virtual_volume;
1243 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1244
1245 for (c = 0; c < new_volume->channels; c++)
1246 if (remapped_volume.values[c] > new_volume->values[c])
1247 new_volume->values[c] = remapped_volume.values[c];
1248 }
1249
1250 /* Then, let's update the soft volumes of all inputs connected
1251 * to this sink */
1252 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1253 pa_cvolume remapped_new_volume;
1254
1255 remapped_new_volume = *new_volume;
1256 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1257 compute_new_soft_volume(i, &remapped_new_volume);
1258
1259 /* We don't copy soft_volume to the thread_info data here
1260 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1261 * want the update to be atomically with the sink volume
1262 * update, hence we do it within the pa_sink_set_volume() call
1263 * below */
1264 }
1265 }
1266
1267 /* Called from main thread */
1268 void pa_sink_propagate_flat_volume(pa_sink *s) {
1269 pa_sink_input *i;
1270 uint32_t idx;
1271
1272 pa_sink_assert_ref(s);
1273 pa_assert(PA_SINK_IS_LINKED(s->state));
1274 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1275
1276 /* This is called whenever the sink volume changes that is not
1277 * caused by a sink input volume change. We need to fix up the
1278 * sink input volumes accordingly */
1279
1280 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1281 pa_cvolume sink_volume, new_virtual_volume;
1282 unsigned c;
1283
1284 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1285
1286 sink_volume = s->virtual_volume;
1287 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1288
1289 for (c = 0; c < i->sample_spec.channels; c++)
1290 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1291 i->relative_volume[c] *
1292 pa_sw_volume_to_linear(sink_volume.values[c]));
1293
1294 new_virtual_volume.channels = i->sample_spec.channels;
1295
1296 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1297 i->virtual_volume = new_virtual_volume;
1298
1299 /* Hmm, the soft volume might no longer actually match
1300 * what has been chosen as new virtual volume here,
1301 * especially when the old volume was
1302 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1303 * volumes here. */
1304 compute_new_soft_volume(i, &sink_volume);
1305
1306 /* The virtual volume changed, let's tell people so */
1307 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1308 }
1309 }
1310
1311 /* If the soft_volume of any of the sink inputs got changed, let's
1312 * make sure the thread copies are synced up. */
1313 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1314 }
1315
1316 /* Called from main thread */
1317 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1318 pa_bool_t virtual_volume_changed;
1319
1320 pa_sink_assert_ref(s);
1321 pa_assert(PA_SINK_IS_LINKED(s->state));
1322 pa_assert(volume);
1323 pa_assert(pa_cvolume_valid(volume));
1324 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1325
1326 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1327 s->virtual_volume = *volume;
1328 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1329
1330 if (become_reference)
1331 s->reference_volume = s->virtual_volume;
1332
1333 /* Propagate this volume change back to the inputs */
1334 if (virtual_volume_changed)
1335 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1336 pa_sink_propagate_flat_volume(s);
1337
1338 if (s->set_volume) {
1339 /* If we have a function set_volume(), then we do not apply a
1340 * soft volume by default. However, set_volume() is free to
1341 * apply one to s->soft_volume */
1342
1343 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1344 s->set_volume(s);
1345
1346 } else
1347 /* If we have no function set_volume(), then the soft volume
1348 * becomes the virtual volume */
1349 s->soft_volume = s->virtual_volume;
1350
1351 /* This tells the sink that soft and/or virtual volume changed */
1352 if (sendmsg)
1353 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1354
1355 if (virtual_volume_changed)
1356 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1357 }
1358
1359 /* Called from main thread. Only to be called by sink implementor */
1360 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1361 pa_sink_assert_ref(s);
1362 pa_assert(volume);
1363
1364 s->soft_volume = *volume;
1365
1366 if (PA_SINK_IS_LINKED(s->state))
1367 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1368 else
1369 s->thread_info.soft_volume = *volume;
1370 }
1371
1372 /* Called from main thread */
1373 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1374 pa_sink_assert_ref(s);
1375
1376 if (s->refresh_volume || force_refresh) {
1377 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1378
1379 if (s->get_volume)
1380 s->get_volume(s);
1381
1382 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1383
1384 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1385
1386 s->reference_volume = s->virtual_volume;
1387
1388 if (s->flags & PA_SINK_FLAT_VOLUME)
1389 pa_sink_propagate_flat_volume(s);
1390
1391 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1392 }
1393 }
1394
1395 return reference ? &s->reference_volume : &s->virtual_volume;
1396 }
1397
1398 /* Called from main thread */
1399 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume, pa_bool_t save) {
1400 pa_sink_assert_ref(s);
1401
1402 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1403 if (pa_cvolume_equal(&s->virtual_volume, new_volume)) {
1404 s->save_volume = s->save_volume || save;
1405 return;
1406 }
1407
1408 s->reference_volume = s->virtual_volume = *new_volume;
1409 s->save_volume = save;
1410
1411 if (s->flags & PA_SINK_FLAT_VOLUME)
1412 pa_sink_propagate_flat_volume(s);
1413
1414 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1415 }
1416
1417 /* Called from main thread */
1418 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1419 pa_bool_t old_muted;
1420
1421 pa_sink_assert_ref(s);
1422 pa_assert(PA_SINK_IS_LINKED(s->state));
1423
1424 old_muted = s->muted;
1425 s->muted = mute;
1426 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1427
1428 if (s->set_mute)
1429 s->set_mute(s);
1430
1431 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1432
1433 if (old_muted != s->muted)
1434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1435 }
1436
1437 /* Called from main thread */
1438 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1439
1440 pa_sink_assert_ref(s);
1441
1442 if (s->refresh_muted || force_refresh) {
1443 pa_bool_t old_muted = s->muted;
1444
1445 if (s->get_mute)
1446 s->get_mute(s);
1447
1448 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1449
1450 if (old_muted != s->muted) {
1451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1452
1453 /* Make sure the soft mute status stays in sync */
1454 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1455 }
1456 }
1457
1458 return s->muted;
1459 }
1460
1461 /* Called from main thread */
1462 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted, pa_bool_t save) {
1463 pa_sink_assert_ref(s);
1464
1465 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1466
1467 if (s->muted == new_muted) {
1468 s->save_muted = s->save_muted || save;
1469 return;
1470 }
1471
1472 s->muted = new_muted;
1473 s->save_muted = save;
1474
1475 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1476 }
1477
1478 /* Called from main thread */
1479 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1480 pa_sink_assert_ref(s);
1481
1482 if (p)
1483 pa_proplist_update(s->proplist, mode, p);
1484
1485 if (PA_SINK_IS_LINKED(s->state)) {
1486 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1487 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1488 }
1489
1490 return TRUE;
1491 }
1492
1493 /* Called from main thread */
1494 void pa_sink_set_description(pa_sink *s, const char *description) {
1495 const char *old;
1496 pa_sink_assert_ref(s);
1497
1498 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1499 return;
1500
1501 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1502
1503 if (old && description && !strcmp(old, description))
1504 return;
1505
1506 if (description)
1507 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1508 else
1509 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1510
1511 if (s->monitor_source) {
1512 char *n;
1513
1514 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1515 pa_source_set_description(s->monitor_source, n);
1516 pa_xfree(n);
1517 }
1518
1519 if (PA_SINK_IS_LINKED(s->state)) {
1520 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1521 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1522 }
1523 }
1524
1525 /* Called from main thread */
1526 unsigned pa_sink_linked_by(pa_sink *s) {
1527 unsigned ret;
1528
1529 pa_sink_assert_ref(s);
1530 pa_assert(PA_SINK_IS_LINKED(s->state));
1531
1532 ret = pa_idxset_size(s->inputs);
1533
1534 /* We add in the number of streams connected to us here. Please
1535 * note the asymmmetry to pa_sink_used_by()! */
1536
1537 if (s->monitor_source)
1538 ret += pa_source_linked_by(s->monitor_source);
1539
1540 return ret;
1541 }
1542
1543 /* Called from main thread */
1544 unsigned pa_sink_used_by(pa_sink *s) {
1545 unsigned ret;
1546
1547 pa_sink_assert_ref(s);
1548 pa_assert(PA_SINK_IS_LINKED(s->state));
1549
1550 ret = pa_idxset_size(s->inputs);
1551 pa_assert(ret >= s->n_corked);
1552
1553 /* Streams connected to our monitor source do not matter for
1554 * pa_sink_used_by()!.*/
1555
1556 return ret - s->n_corked;
1557 }
1558
1559 /* Called from main thread */
1560 unsigned pa_sink_check_suspend(pa_sink *s) {
1561 unsigned ret;
1562 pa_sink_input *i;
1563 uint32_t idx;
1564
1565 pa_sink_assert_ref(s);
1566
1567 if (!PA_SINK_IS_LINKED(s->state))
1568 return 0;
1569
1570 ret = 0;
1571
1572 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1573 pa_sink_input_state_t st;
1574
1575 st = pa_sink_input_get_state(i);
1576 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1577
1578 if (st == PA_SINK_INPUT_CORKED)
1579 continue;
1580
1581 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1582 continue;
1583
1584 ret ++;
1585 }
1586
1587 if (s->monitor_source)
1588 ret += pa_source_check_suspend(s->monitor_source);
1589
1590 return ret;
1591 }
1592
1593 /* Called from the IO thread */
1594 static void sync_input_volumes_within_thread(pa_sink *s) {
1595 pa_sink_input *i;
1596 void *state = NULL;
1597
1598 pa_sink_assert_ref(s);
1599
1600 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1601 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1602 continue;
1603
1604 i->thread_info.soft_volume = i->soft_volume;
1605 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1606 }
1607 }
1608
1609 /* Called from IO thread, except when it is not */
1610 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1611 pa_sink *s = PA_SINK(o);
1612 pa_sink_assert_ref(s);
1613
1614 switch ((pa_sink_message_t) code) {
1615
1616 case PA_SINK_MESSAGE_ADD_INPUT: {
1617 pa_sink_input *i = PA_SINK_INPUT(userdata);
1618
1619 /* If you change anything here, make sure to change the
1620 * sink input handling a few lines down at
1621 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1622
1623 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1624
1625 /* Since the caller sleeps in pa_sink_input_put(), we can
1626 * safely access data outside of thread_info even though
1627 * it is mutable */
1628
1629 if ((i->thread_info.sync_prev = i->sync_prev)) {
1630 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1631 pa_assert(i->sync_prev->sync_next == i);
1632 i->thread_info.sync_prev->thread_info.sync_next = i;
1633 }
1634
1635 if ((i->thread_info.sync_next = i->sync_next)) {
1636 pa_assert(i->sink == i->thread_info.sync_next->sink);
1637 pa_assert(i->sync_next->sync_prev == i);
1638 i->thread_info.sync_next->thread_info.sync_prev = i;
1639 }
1640
1641 pa_assert(!i->thread_info.attached);
1642 i->thread_info.attached = TRUE;
1643
1644 if (i->attach)
1645 i->attach(i);
1646
1647 pa_sink_input_set_state_within_thread(i, i->state);
1648
1649 /* The requested latency of the sink input needs to be
1650 * fixed up and then configured on the sink */
1651
1652 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1653 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1654
1655 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1656 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1657
1658 /* We don't rewind here automatically. This is left to the
1659 * sink input implementor because some sink inputs need a
1660 * slow start, i.e. need some time to buffer client
1661 * samples before beginning streaming. */
1662
1663 /* In flat volume mode we need to update the volume as
1664 * well */
1665 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1666 }
1667
1668 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1669 pa_sink_input *i = PA_SINK_INPUT(userdata);
1670
1671 /* If you change anything here, make sure to change the
1672 * sink input handling a few lines down at
1673 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1674
1675 if (i->detach)
1676 i->detach(i);
1677
1678 pa_sink_input_set_state_within_thread(i, i->state);
1679
1680 pa_assert(i->thread_info.attached);
1681 i->thread_info.attached = FALSE;
1682
1683 /* Since the caller sleeps in pa_sink_input_unlink(),
1684 * we can safely access data outside of thread_info even
1685 * though it is mutable */
1686
1687 pa_assert(!i->sync_prev);
1688 pa_assert(!i->sync_next);
1689
1690 if (i->thread_info.sync_prev) {
1691 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1692 i->thread_info.sync_prev = NULL;
1693 }
1694
1695 if (i->thread_info.sync_next) {
1696 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1697 i->thread_info.sync_next = NULL;
1698 }
1699
1700 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1701 pa_sink_input_unref(i);
1702
1703 pa_sink_invalidate_requested_latency(s);
1704 pa_sink_request_rewind(s, (size_t) -1);
1705
1706 /* In flat volume mode we need to update the volume as
1707 * well */
1708 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1709 }
1710
1711 case PA_SINK_MESSAGE_START_MOVE: {
1712 pa_sink_input *i = PA_SINK_INPUT(userdata);
1713
1714 /* We don't support moving synchronized streams. */
1715 pa_assert(!i->sync_prev);
1716 pa_assert(!i->sync_next);
1717 pa_assert(!i->thread_info.sync_next);
1718 pa_assert(!i->thread_info.sync_prev);
1719
1720 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1721 pa_usec_t usec = 0;
1722 size_t sink_nbytes, total_nbytes;
1723
1724 /* Get the latency of the sink */
1725 if (!(s->flags & PA_SINK_LATENCY) ||
1726 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1727 usec = 0;
1728
1729 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1730 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1731
1732 if (total_nbytes > 0) {
1733 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1734 i->thread_info.rewrite_flush = TRUE;
1735 pa_sink_input_process_rewind(i, sink_nbytes);
1736 }
1737 }
1738
1739 if (i->detach)
1740 i->detach(i);
1741
1742 pa_assert(i->thread_info.attached);
1743 i->thread_info.attached = FALSE;
1744
1745 /* Let's remove the sink input ...*/
1746 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1747 pa_sink_input_unref(i);
1748
1749 pa_sink_invalidate_requested_latency(s);
1750
1751 pa_log_debug("Requesting rewind due to started move");
1752 pa_sink_request_rewind(s, (size_t) -1);
1753
1754 /* In flat volume mode we need to update the volume as
1755 * well */
1756 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1757 }
1758
1759 case PA_SINK_MESSAGE_FINISH_MOVE: {
1760 pa_sink_input *i = PA_SINK_INPUT(userdata);
1761
1762 /* We don't support moving synchronized streams. */
1763 pa_assert(!i->sync_prev);
1764 pa_assert(!i->sync_next);
1765 pa_assert(!i->thread_info.sync_next);
1766 pa_assert(!i->thread_info.sync_prev);
1767
1768 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1769
1770 pa_assert(!i->thread_info.attached);
1771 i->thread_info.attached = TRUE;
1772
1773 if (i->attach)
1774 i->attach(i);
1775
1776 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1777 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1778
1779 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1780 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1781
1782 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1783 pa_usec_t usec = 0;
1784 size_t nbytes;
1785
1786 /* Get the latency of the sink */
1787 if (!(s->flags & PA_SINK_LATENCY) ||
1788 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1789 usec = 0;
1790
1791 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1792
1793 if (nbytes > 0)
1794 pa_sink_input_drop(i, nbytes);
1795
1796 pa_log_debug("Requesting rewind due to finished move");
1797 pa_sink_request_rewind(s, nbytes);
1798 }
1799
1800 /* In flat volume mode we need to update the volume as
1801 * well */
1802 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1803 }
1804
1805 case PA_SINK_MESSAGE_SET_VOLUME:
1806
1807 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1808 s->thread_info.soft_volume = s->soft_volume;
1809 pa_sink_request_rewind(s, (size_t) -1);
1810 }
1811
1812 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1813 return 0;
1814
1815 /* Fall through ... */
1816
1817 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1818 sync_input_volumes_within_thread(s);
1819 return 0;
1820
1821 case PA_SINK_MESSAGE_GET_VOLUME:
1822 return 0;
1823
1824 case PA_SINK_MESSAGE_SET_MUTE:
1825
1826 if (s->thread_info.soft_muted != s->muted) {
1827 s->thread_info.soft_muted = s->muted;
1828 pa_sink_request_rewind(s, (size_t) -1);
1829 }
1830
1831 return 0;
1832
1833 case PA_SINK_MESSAGE_GET_MUTE:
1834 return 0;
1835
1836 case PA_SINK_MESSAGE_SET_STATE: {
1837
1838 pa_bool_t suspend_change =
1839 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1840 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1841
1842 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1843
1844 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1845 s->thread_info.rewind_nbytes = 0;
1846 s->thread_info.rewind_requested = FALSE;
1847 }
1848
1849 if (suspend_change) {
1850 pa_sink_input *i;
1851 void *state = NULL;
1852
1853 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1854 if (i->suspend_within_thread)
1855 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1856 }
1857
1858 return 0;
1859 }
1860
1861 case PA_SINK_MESSAGE_DETACH:
1862
1863 /* Detach all streams */
1864 pa_sink_detach_within_thread(s);
1865 return 0;
1866
1867 case PA_SINK_MESSAGE_ATTACH:
1868
1869 /* Reattach all streams */
1870 pa_sink_attach_within_thread(s);
1871 return 0;
1872
1873 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1874
1875 pa_usec_t *usec = userdata;
1876 *usec = pa_sink_get_requested_latency_within_thread(s);
1877
1878 if (*usec == (pa_usec_t) -1)
1879 *usec = s->thread_info.max_latency;
1880
1881 return 0;
1882 }
1883
1884 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1885 pa_usec_t *r = userdata;
1886
1887 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1888
1889 return 0;
1890 }
1891
1892 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1893 pa_usec_t *r = userdata;
1894
1895 r[0] = s->thread_info.min_latency;
1896 r[1] = s->thread_info.max_latency;
1897
1898 return 0;
1899 }
1900
1901 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1902
1903 *((size_t*) userdata) = s->thread_info.max_rewind;
1904 return 0;
1905
1906 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1907
1908 *((size_t*) userdata) = s->thread_info.max_request;
1909 return 0;
1910
1911 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1912
1913 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1914 return 0;
1915
1916 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1917
1918 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1919 return 0;
1920
1921 case PA_SINK_MESSAGE_GET_LATENCY:
1922 case PA_SINK_MESSAGE_MAX:
1923 ;
1924 }
1925
1926 return -1;
1927 }
1928
1929 /* Called from main thread */
1930 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1931 pa_sink *sink;
1932 uint32_t idx;
1933 int ret = 0;
1934
1935 pa_core_assert_ref(c);
1936 pa_assert(cause != 0);
1937
1938 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1939 int r;
1940
1941 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1942 ret = r;
1943 }
1944
1945 return ret;
1946 }
1947
1948 /* Called from main thread */
1949 void pa_sink_detach(pa_sink *s) {
1950 pa_sink_assert_ref(s);
1951 pa_assert(PA_SINK_IS_LINKED(s->state));
1952
1953 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1954 }
1955
1956 /* Called from main thread */
1957 void pa_sink_attach(pa_sink *s) {
1958 pa_sink_assert_ref(s);
1959 pa_assert(PA_SINK_IS_LINKED(s->state));
1960
1961 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1962 }
1963
1964 /* Called from IO thread */
1965 void pa_sink_detach_within_thread(pa_sink *s) {
1966 pa_sink_input *i;
1967 void *state = NULL;
1968
1969 pa_sink_assert_ref(s);
1970 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1971
1972 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1973 if (i->detach)
1974 i->detach(i);
1975
1976 if (s->monitor_source)
1977 pa_source_detach_within_thread(s->monitor_source);
1978 }
1979
1980 /* Called from IO thread */
1981 void pa_sink_attach_within_thread(pa_sink *s) {
1982 pa_sink_input *i;
1983 void *state = NULL;
1984
1985 pa_sink_assert_ref(s);
1986 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1987
1988 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1989 if (i->attach)
1990 i->attach(i);
1991
1992 if (s->monitor_source)
1993 pa_source_attach_within_thread(s->monitor_source);
1994 }
1995
1996 /* Called from IO thread */
1997 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1998 pa_sink_assert_ref(s);
1999 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2000
2001 if (s->thread_info.state == PA_SINK_SUSPENDED)
2002 return;
2003
2004 if (nbytes == (size_t) -1)
2005 nbytes = s->thread_info.max_rewind;
2006
2007 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2008
2009 if (s->thread_info.rewind_requested &&
2010 nbytes <= s->thread_info.rewind_nbytes)
2011 return;
2012
2013 s->thread_info.rewind_nbytes = nbytes;
2014 s->thread_info.rewind_requested = TRUE;
2015
2016 if (s->request_rewind)
2017 s->request_rewind(s);
2018 }
2019
2020 /* Called from IO thread */
2021 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2022 pa_usec_t result = (pa_usec_t) -1;
2023 pa_sink_input *i;
2024 void *state = NULL;
2025 pa_usec_t monitor_latency;
2026
2027 pa_sink_assert_ref(s);
2028
2029 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2030 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2031
2032 if (s->thread_info.requested_latency_valid)
2033 return s->thread_info.requested_latency;
2034
2035 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2036
2037 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2038 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2039 result = i->thread_info.requested_sink_latency;
2040
2041 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2042
2043 if (monitor_latency != (pa_usec_t) -1 &&
2044 (result == (pa_usec_t) -1 || result > monitor_latency))
2045 result = monitor_latency;
2046
2047 if (result != (pa_usec_t) -1)
2048 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2049
2050 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2051 /* Only cache if properly initialized */
2052 s->thread_info.requested_latency = result;
2053 s->thread_info.requested_latency_valid = TRUE;
2054 }
2055
2056 return result;
2057 }
2058
2059 /* Called from main thread */
2060 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2061 pa_usec_t usec = 0;
2062
2063 pa_sink_assert_ref(s);
2064 pa_assert(PA_SINK_IS_LINKED(s->state));
2065
2066 if (s->state == PA_SINK_SUSPENDED)
2067 return 0;
2068
2069 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2070 return usec;
2071 }
2072
2073 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2074 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2075 pa_sink_input *i;
2076 void *state = NULL;
2077
2078 pa_sink_assert_ref(s);
2079
2080 if (max_rewind == s->thread_info.max_rewind)
2081 return;
2082
2083 s->thread_info.max_rewind = max_rewind;
2084
2085 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2086 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2087 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2088 }
2089
2090 if (s->monitor_source)
2091 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2092 }
2093
2094 /* Called from main thread */
2095 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2096 pa_sink_assert_ref(s);
2097
2098 if (PA_SINK_IS_LINKED(s->state))
2099 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2100 else
2101 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2102 }
2103
2104 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2105 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2106 void *state = NULL;
2107
2108 pa_sink_assert_ref(s);
2109
2110 if (max_request == s->thread_info.max_request)
2111 return;
2112
2113 s->thread_info.max_request = max_request;
2114
2115 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2116 pa_sink_input *i;
2117
2118 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2119 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2120 }
2121 }
2122
2123 /* Called from main thread */
2124 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2125 pa_sink_assert_ref(s);
2126
2127 if (PA_SINK_IS_LINKED(s->state))
2128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2129 else
2130 pa_sink_set_max_request_within_thread(s, max_request);
2131 }
2132
2133 /* Called from IO thread */
2134 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2135 pa_sink_input *i;
2136 void *state = NULL;
2137
2138 pa_sink_assert_ref(s);
2139
2140 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2141 return;
2142
2143 s->thread_info.requested_latency_valid = FALSE;
2144
2145 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2146
2147 if (s->update_requested_latency)
2148 s->update_requested_latency(s);
2149
2150 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2151 if (i->update_sink_requested_latency)
2152 i->update_sink_requested_latency(i);
2153 }
2154 }
2155
2156 /* Called from main thread */
2157 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2158 pa_sink_assert_ref(s);
2159
2160 /* min_latency == 0: no limit
2161 * min_latency anything else: specified limit
2162 *
2163 * Similar for max_latency */
2164
2165 if (min_latency < ABSOLUTE_MIN_LATENCY)
2166 min_latency = ABSOLUTE_MIN_LATENCY;
2167
2168 if (max_latency <= 0 ||
2169 max_latency > ABSOLUTE_MAX_LATENCY)
2170 max_latency = ABSOLUTE_MAX_LATENCY;
2171
2172 pa_assert(min_latency <= max_latency);
2173
2174 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2175 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2176 max_latency == ABSOLUTE_MAX_LATENCY) ||
2177 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2178
2179 if (PA_SINK_IS_LINKED(s->state)) {
2180 pa_usec_t r[2];
2181
2182 r[0] = min_latency;
2183 r[1] = max_latency;
2184
2185 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2186 } else
2187 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2188 }
2189
2190 /* Called from main thread */
2191 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2192 pa_sink_assert_ref(s);
2193 pa_assert(min_latency);
2194 pa_assert(max_latency);
2195
2196 if (PA_SINK_IS_LINKED(s->state)) {
2197 pa_usec_t r[2] = { 0, 0 };
2198
2199 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2200
2201 *min_latency = r[0];
2202 *max_latency = r[1];
2203 } else {
2204 *min_latency = s->thread_info.min_latency;
2205 *max_latency = s->thread_info.max_latency;
2206 }
2207 }
2208
2209 /* Called from IO thread */
2210 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2211 void *state = NULL;
2212
2213 pa_sink_assert_ref(s);
2214
2215 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2216 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2217 pa_assert(min_latency <= max_latency);
2218
2219 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2220 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2221 max_latency == ABSOLUTE_MAX_LATENCY) ||
2222 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2223
2224 s->thread_info.min_latency = min_latency;
2225 s->thread_info.max_latency = max_latency;
2226
2227 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2228 pa_sink_input *i;
2229
2230 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2231 if (i->update_sink_latency_range)
2232 i->update_sink_latency_range(i);
2233 }
2234
2235 pa_sink_invalidate_requested_latency(s);
2236
2237 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2238 }
2239
2240 /* Called from main thread, before the sink is put */
2241 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2242 pa_sink_assert_ref(s);
2243
2244 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2245
2246 if (latency < ABSOLUTE_MIN_LATENCY)
2247 latency = ABSOLUTE_MIN_LATENCY;
2248
2249 if (latency > ABSOLUTE_MAX_LATENCY)
2250 latency = ABSOLUTE_MAX_LATENCY;
2251
2252 s->fixed_latency = latency;
2253 pa_source_set_fixed_latency(s->monitor_source, latency);
2254 }
2255
2256 /* Called from main context */
2257 size_t pa_sink_get_max_rewind(pa_sink *s) {
2258 size_t r;
2259 pa_sink_assert_ref(s);
2260
2261 if (!PA_SINK_IS_LINKED(s->state))
2262 return s->thread_info.max_rewind;
2263
2264 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2265
2266 return r;
2267 }
2268
2269 /* Called from main context */
2270 size_t pa_sink_get_max_request(pa_sink *s) {
2271 size_t r;
2272 pa_sink_assert_ref(s);
2273
2274 if (!PA_SINK_IS_LINKED(s->state))
2275 return s->thread_info.max_request;
2276
2277 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2278
2279 return r;
2280 }
2281
2282 /* Called from main context */
2283 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2284 pa_device_port *port;
2285
2286 pa_assert(s);
2287
2288 if (!s->set_port) {
2289 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2290 return -PA_ERR_NOTIMPLEMENTED;
2291 }
2292
2293 if (!s->ports)
2294 return -PA_ERR_NOENTITY;
2295
2296 if (!(port = pa_hashmap_get(s->ports, name)))
2297 return -PA_ERR_NOENTITY;
2298
2299 if (s->active_port == port) {
2300 s->save_port = s->save_port || save;
2301 return 0;
2302 }
2303
2304 if ((s->set_port(s, port)) < 0)
2305 return -PA_ERR_NOENTITY;
2306
2307 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2308
2309 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2310
2311 s->active_port = port;
2312 s->save_port = save;
2313
2314 return 0;
2315 }
2316
2317 /* Called from main context */
2318 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2319 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2320
2321 pa_assert(p);
2322
2323 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2324 return TRUE;
2325
2326 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2327
2328 if (pa_streq(ff, "microphone"))
2329 t = "audio-input-microphone";
2330 else if (pa_streq(ff, "webcam"))
2331 t = "camera-web";
2332 else if (pa_streq(ff, "computer"))
2333 t = "computer";
2334 else if (pa_streq(ff, "handset"))
2335 t = "phone";
2336 else if (pa_streq(ff, "portable"))
2337 t = "multimedia-player";
2338 else if (pa_streq(ff, "tv"))
2339 t = "video-display";
2340
2341 /*
2342 * The following icons are not part of the icon naming spec,
2343 * because Rodney Dawes sucks as the maintainer of that spec.
2344 *
2345 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2346 */
2347 else if (pa_streq(ff, "headset"))
2348 t = "audio-headset";
2349 else if (pa_streq(ff, "headphone"))
2350 t = "audio-headphones";
2351 else if (pa_streq(ff, "speaker"))
2352 t = "audio-speakers";
2353 else if (pa_streq(ff, "hands-free"))
2354 t = "audio-handsfree";
2355 }
2356
2357 if (!t)
2358 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2359 if (pa_streq(c, "modem"))
2360 t = "modem";
2361
2362 if (!t) {
2363 if (is_sink)
2364 t = "audio-card";
2365 else
2366 t = "audio-input-microphone";
2367 }
2368
2369 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2370 if (strstr(profile, "analog"))
2371 s = "-analog";
2372 else if (strstr(profile, "iec958"))
2373 s = "-iec958";
2374 else if (strstr(profile, "hdmi"))
2375 s = "-hdmi";
2376 }
2377
2378 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2379
2380 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2381
2382 return TRUE;
2383 }
2384
2385 pa_bool_t pa_device_init_description(pa_proplist *p) {
2386 const char *s;
2387 pa_assert(p);
2388
2389 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2390 return TRUE;
2391
2392 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2393 if (pa_streq(s, "internal")) {
2394 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2395 return TRUE;
2396 }
2397
2398 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2399 if (pa_streq(s, "modem")) {
2400 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2401 return TRUE;
2402 }
2403
2404 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2405 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2406 return TRUE;
2407 }
2408
2409 return FALSE;
2410 }
2411
2412 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2413 const char *s;
2414 pa_assert(p);
2415
2416 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2417 return TRUE;
2418
2419 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2420 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2421 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2422 return TRUE;
2423 }
2424
2425 return FALSE;
2426 }