]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: split of FAIL_ON_SUSPEND into KILL_ON_SUSPEND and NO_CREATE_ON_SUSPEND
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
260
261 reset_callbacks(s);
262 s->userdata = NULL;
263
264 s->asyncmsgq = NULL;
265 s->rtpoll = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
299 s->thread_info.soft_volume = s->soft_volume;
300 s->thread_info.soft_muted = s->muted;
301 s->thread_info.state = s->state;
302 s->thread_info.rewind_nbytes = 0;
303 s->thread_info.rewind_requested = FALSE;
304 s->thread_info.max_rewind = 0;
305 s->thread_info.max_request = 0;
306 s->thread_info.requested_latency_valid = FALSE;
307 s->thread_info.requested_latency = 0;
308 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
309 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
310
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312
313 if (s->card)
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
318 s->index,
319 s->name,
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
322 pt);
323 pa_xfree(pt);
324
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
332
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340
341 pa_source_new_data_done(&source_data);
342
343 if (!s->monitor_source) {
344 pa_sink_unlink(s);
345 pa_sink_unref(s);
346 return NULL;
347 }
348
349 s->monitor_source->monitor_of = s;
350
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
353
354 return s;
355 }
356
357 /* Called from main context */
358 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
359 int ret;
360 pa_bool_t suspend_change;
361 pa_sink_state_t original_state;
362
363 pa_assert(s);
364 pa_assert_ctl_context();
365
366 if (s->state == state)
367 return 0;
368
369 original_state = s->state;
370
371 suspend_change =
372 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
373 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
374
375 if (s->set_state)
376 if ((ret = s->set_state(s, state)) < 0)
377 return ret;
378
379 if (s->asyncmsgq)
380 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
381
382 if (s->set_state)
383 s->set_state(s, original_state);
384
385 return ret;
386 }
387
388 s->state = state;
389
390 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
393 }
394
395 if (suspend_change) {
396 pa_sink_input *i;
397 uint32_t idx;
398
399 /* We're suspending or resuming, tell everyone about it */
400
401 PA_IDXSET_FOREACH(i, s->inputs, idx)
402 if (s->state == PA_SINK_SUSPENDED &&
403 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
404 pa_sink_input_kill(i);
405 else if (i->suspend)
406 i->suspend(i, state == PA_SINK_SUSPENDED);
407
408 if (s->monitor_source)
409 pa_source_sync_suspend(s->monitor_source);
410 }
411
412 return 0;
413 }
414
415 /* Called from main context */
416 void pa_sink_put(pa_sink* s) {
417 pa_sink_assert_ref(s);
418 pa_assert_ctl_context();
419
420 pa_assert(s->state == PA_SINK_INIT);
421
422 /* The following fields must be initialized properly when calling _put() */
423 pa_assert(s->asyncmsgq);
424 pa_assert(s->rtpoll);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
426
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
430
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
433
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
436
437 s->thread_info.soft_volume = s->soft_volume;
438 s->thread_info.soft_muted = s->muted;
439
440 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
441 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
442 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
443 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
444 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
445
446 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
447 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
448 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
449
450 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
451
452 pa_source_put(s->monitor_source);
453
454 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
455 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
456 }
457
458 /* Called from main context */
459 void pa_sink_unlink(pa_sink* s) {
460 pa_bool_t linked;
461 pa_sink_input *i, *j = NULL;
462
463 pa_assert(s);
464 pa_assert_ctl_context();
465
466 /* Please note that pa_sink_unlink() does more than simply
467 * reversing pa_sink_put(). It also undoes the registrations
468 * already done in pa_sink_new()! */
469
470 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
471 * may be called multiple times on the same sink without bad
472 * effects. */
473
474 linked = PA_SINK_IS_LINKED(s->state);
475
476 if (linked)
477 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
478
479 if (s->state != PA_SINK_UNLINKED)
480 pa_namereg_unregister(s->core, s->name);
481 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
482
483 if (s->card)
484 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
485
486 while ((i = pa_idxset_first(s->inputs, NULL))) {
487 pa_assert(i != j);
488 pa_sink_input_kill(i);
489 j = i;
490 }
491
492 if (linked)
493 sink_set_state(s, PA_SINK_UNLINKED);
494 else
495 s->state = PA_SINK_UNLINKED;
496
497 reset_callbacks(s);
498
499 if (s->monitor_source)
500 pa_source_unlink(s->monitor_source);
501
502 if (linked) {
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
504 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
505 }
506 }
507
508 /* Called from main context */
509 static void sink_free(pa_object *o) {
510 pa_sink *s = PA_SINK(o);
511 pa_sink_input *i;
512
513 pa_assert(s);
514 pa_assert_ctl_context();
515 pa_assert(pa_sink_refcnt(s) == 0);
516
517 if (PA_SINK_IS_LINKED(s->state))
518 pa_sink_unlink(s);
519
520 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
521
522 if (s->monitor_source) {
523 pa_source_unref(s->monitor_source);
524 s->monitor_source = NULL;
525 }
526
527 pa_idxset_free(s->inputs, NULL, NULL);
528
529 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
530 pa_sink_input_unref(i);
531
532 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
533
534 if (s->silence.memblock)
535 pa_memblock_unref(s->silence.memblock);
536
537 pa_xfree(s->name);
538 pa_xfree(s->driver);
539
540 if (s->proplist)
541 pa_proplist_free(s->proplist);
542
543 if (s->ports) {
544 pa_device_port *p;
545
546 while ((p = pa_hashmap_steal_first(s->ports)))
547 pa_device_port_free(p);
548
549 pa_hashmap_free(s->ports, NULL, NULL);
550 }
551
552 pa_xfree(s);
553 }
554
555 /* Called from main context */
556 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
557 pa_sink_assert_ref(s);
558 pa_assert_ctl_context();
559
560 s->asyncmsgq = q;
561
562 if (s->monitor_source)
563 pa_source_set_asyncmsgq(s->monitor_source, q);
564 }
565
566 /* Called from main context */
567 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
568 pa_sink_assert_ref(s);
569 pa_assert_ctl_context();
570
571 s->rtpoll = p;
572
573 if (s->monitor_source)
574 pa_source_set_rtpoll(s->monitor_source, p);
575 }
576
577 /* Called from main context */
578 int pa_sink_update_status(pa_sink*s) {
579 pa_sink_assert_ref(s);
580 pa_assert_ctl_context();
581 pa_assert(PA_SINK_IS_LINKED(s->state));
582
583 if (s->state == PA_SINK_SUSPENDED)
584 return 0;
585
586 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
587 }
588
589 /* Called from main context */
590 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
591 pa_sink_assert_ref(s);
592 pa_assert_ctl_context();
593 pa_assert(PA_SINK_IS_LINKED(s->state));
594 pa_assert(cause != 0);
595
596 if (suspend) {
597 s->suspend_cause |= cause;
598 s->monitor_source->suspend_cause |= cause;
599 } else {
600 s->suspend_cause &= ~cause;
601 s->monitor_source->suspend_cause &= ~cause;
602 }
603
604 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
605 return 0;
606
607 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
608
609 if (s->suspend_cause)
610 return sink_set_state(s, PA_SINK_SUSPENDED);
611 else
612 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
613 }
614
615 /* Called from main context */
616 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
617 pa_sink_input *i, *n;
618 uint32_t idx;
619
620 pa_sink_assert_ref(s);
621 pa_assert_ctl_context();
622 pa_assert(PA_SINK_IS_LINKED(s->state));
623
624 if (!q)
625 q = pa_queue_new();
626
627 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
628 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
629
630 pa_sink_input_ref(i);
631
632 if (pa_sink_input_start_move(i) >= 0)
633 pa_queue_push(q, i);
634 else
635 pa_sink_input_unref(i);
636 }
637
638 return q;
639 }
640
641 /* Called from main context */
642 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
643 pa_sink_input *i;
644
645 pa_sink_assert_ref(s);
646 pa_assert_ctl_context();
647 pa_assert(PA_SINK_IS_LINKED(s->state));
648 pa_assert(q);
649
650 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
651 if (pa_sink_input_finish_move(i, s, save) < 0)
652 pa_sink_input_fail_move(i);
653
654 pa_sink_input_unref(i);
655 }
656
657 pa_queue_free(q, NULL, NULL);
658 }
659
660 /* Called from main context */
661 void pa_sink_move_all_fail(pa_queue *q) {
662 pa_sink_input *i;
663
664 pa_assert_ctl_context();
665 pa_assert(q);
666
667 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
668 pa_sink_input_fail_move(i);
669 pa_sink_input_unref(i);
670 }
671
672 pa_queue_free(q, NULL, NULL);
673 }
674
675 /* Called from IO thread context */
676 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
677 pa_sink_input *i;
678 void *state = NULL;
679
680 pa_sink_assert_ref(s);
681 pa_sink_assert_io_context(s);
682 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
683
684 /* If nobody requested this and this is actually no real rewind
685 * then we can short cut this. Please note that this means that
686 * not all rewind requests triggered upstream will always be
687 * translated in actual requests! */
688 if (!s->thread_info.rewind_requested && nbytes <= 0)
689 return;
690
691 s->thread_info.rewind_nbytes = 0;
692 s->thread_info.rewind_requested = FALSE;
693
694 if (s->thread_info.state == PA_SINK_SUSPENDED)
695 return;
696
697 if (nbytes > 0)
698 pa_log_debug("Processing rewind...");
699
700 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
701 pa_sink_input_assert_ref(i);
702 pa_sink_input_process_rewind(i, nbytes);
703 }
704
705 if (nbytes > 0)
706 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
707 pa_source_process_rewind(s->monitor_source, nbytes);
708 }
709
710 /* Called from IO thread context */
711 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
712 pa_sink_input *i;
713 unsigned n = 0;
714 void *state = NULL;
715 size_t mixlength = *length;
716
717 pa_sink_assert_ref(s);
718 pa_sink_assert_io_context(s);
719 pa_assert(info);
720
721 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
722 pa_sink_input_assert_ref(i);
723
724 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
725
726 if (mixlength == 0 || info->chunk.length < mixlength)
727 mixlength = info->chunk.length;
728
729 if (pa_memblock_is_silence(info->chunk.memblock)) {
730 pa_memblock_unref(info->chunk.memblock);
731 continue;
732 }
733
734 info->userdata = pa_sink_input_ref(i);
735
736 pa_assert(info->chunk.memblock);
737 pa_assert(info->chunk.length > 0);
738
739 info++;
740 n++;
741 maxinfo--;
742 }
743
744 if (mixlength > 0)
745 *length = mixlength;
746
747 return n;
748 }
749
750 /* Called from IO thread context */
751 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
752 pa_sink_input *i;
753 void *state = NULL;
754 unsigned p = 0;
755 unsigned n_unreffed = 0;
756
757 pa_sink_assert_ref(s);
758 pa_sink_assert_io_context(s);
759 pa_assert(result);
760 pa_assert(result->memblock);
761 pa_assert(result->length > 0);
762
763 /* We optimize for the case where the order of the inputs has not changed */
764
765 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
766 unsigned j;
767 pa_mix_info* m = NULL;
768
769 pa_sink_input_assert_ref(i);
770
771 /* Let's try to find the matching entry info the pa_mix_info array */
772 for (j = 0; j < n; j ++) {
773
774 if (info[p].userdata == i) {
775 m = info + p;
776 break;
777 }
778
779 p++;
780 if (p >= n)
781 p = 0;
782 }
783
784 /* Drop read data */
785 pa_sink_input_drop(i, result->length);
786
787 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
788
789 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
790 void *ostate = NULL;
791 pa_source_output *o;
792 pa_memchunk c;
793
794 if (m && m->chunk.memblock) {
795 c = m->chunk;
796 pa_memblock_ref(c.memblock);
797 pa_assert(result->length <= c.length);
798 c.length = result->length;
799
800 pa_memchunk_make_writable(&c, 0);
801 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
802 } else {
803 c = s->silence;
804 pa_memblock_ref(c.memblock);
805 pa_assert(result->length <= c.length);
806 c.length = result->length;
807 }
808
809 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
810 pa_source_output_assert_ref(o);
811 pa_assert(o->direct_on_input == i);
812 pa_source_post_direct(s->monitor_source, o, &c);
813 }
814
815 pa_memblock_unref(c.memblock);
816 }
817 }
818
819 if (m) {
820 if (m->chunk.memblock)
821 pa_memblock_unref(m->chunk.memblock);
822 pa_memchunk_reset(&m->chunk);
823
824 pa_sink_input_unref(m->userdata);
825 m->userdata = NULL;
826
827 n_unreffed += 1;
828 }
829 }
830
831 /* Now drop references to entries that are included in the
832 * pa_mix_info array but don't exist anymore */
833
834 if (n_unreffed < n) {
835 for (; n > 0; info++, n--) {
836 if (info->userdata)
837 pa_sink_input_unref(info->userdata);
838 if (info->chunk.memblock)
839 pa_memblock_unref(info->chunk.memblock);
840 }
841 }
842
843 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
844 pa_source_post(s->monitor_source, result);
845 }
846
847 /* Called from IO thread context */
848 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
849 pa_mix_info info[MAX_MIX_CHANNELS];
850 unsigned n;
851 size_t block_size_max;
852
853 pa_sink_assert_ref(s);
854 pa_sink_assert_io_context(s);
855 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
856 pa_assert(pa_frame_aligned(length, &s->sample_spec));
857 pa_assert(result);
858
859 pa_sink_ref(s);
860
861 pa_assert(!s->thread_info.rewind_requested);
862 pa_assert(s->thread_info.rewind_nbytes == 0);
863
864 if (s->thread_info.state == PA_SINK_SUSPENDED) {
865 result->memblock = pa_memblock_ref(s->silence.memblock);
866 result->index = s->silence.index;
867 result->length = PA_MIN(s->silence.length, length);
868 return;
869 }
870
871 if (length <= 0)
872 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
873
874 block_size_max = pa_mempool_block_size_max(s->core->mempool);
875 if (length > block_size_max)
876 length = pa_frame_align(block_size_max, &s->sample_spec);
877
878 pa_assert(length > 0);
879
880 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
881
882 if (n == 0) {
883
884 *result = s->silence;
885 pa_memblock_ref(result->memblock);
886
887 if (result->length > length)
888 result->length = length;
889
890 } else if (n == 1) {
891 pa_cvolume volume;
892
893 *result = info[0].chunk;
894 pa_memblock_ref(result->memblock);
895
896 if (result->length > length)
897 result->length = length;
898
899 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
900
901 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
902 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
903 pa_memblock_unref(result->memblock);
904 pa_silence_memchunk_get(&s->core->silence_cache,
905 s->core->mempool,
906 result,
907 &s->sample_spec,
908 result->length);
909 } else {
910 pa_memchunk_make_writable(result, 0);
911 pa_volume_memchunk(result, &s->sample_spec, &volume);
912 }
913 }
914 } else {
915 void *ptr;
916 result->memblock = pa_memblock_new(s->core->mempool, length);
917
918 ptr = pa_memblock_acquire(result->memblock);
919 result->length = pa_mix(info, n,
920 ptr, length,
921 &s->sample_spec,
922 &s->thread_info.soft_volume,
923 s->thread_info.soft_muted);
924 pa_memblock_release(result->memblock);
925
926 result->index = 0;
927 }
928
929 inputs_drop(s, info, n, result);
930
931 pa_sink_unref(s);
932 }
933
934 /* Called from IO thread context */
935 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
936 pa_mix_info info[MAX_MIX_CHANNELS];
937 unsigned n;
938 size_t length, block_size_max;
939
940 pa_sink_assert_ref(s);
941 pa_sink_assert_io_context(s);
942 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
943 pa_assert(target);
944 pa_assert(target->memblock);
945 pa_assert(target->length > 0);
946 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
947
948 pa_sink_ref(s);
949
950 pa_assert(!s->thread_info.rewind_requested);
951 pa_assert(s->thread_info.rewind_nbytes == 0);
952
953 if (s->thread_info.state == PA_SINK_SUSPENDED) {
954 pa_silence_memchunk(target, &s->sample_spec);
955 return;
956 }
957
958 length = target->length;
959 block_size_max = pa_mempool_block_size_max(s->core->mempool);
960 if (length > block_size_max)
961 length = pa_frame_align(block_size_max, &s->sample_spec);
962
963 pa_assert(length > 0);
964
965 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
966
967 if (n == 0) {
968 if (target->length > length)
969 target->length = length;
970
971 pa_silence_memchunk(target, &s->sample_spec);
972 } else if (n == 1) {
973 pa_cvolume volume;
974
975 if (target->length > length)
976 target->length = length;
977
978 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
979
980 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
981 pa_silence_memchunk(target, &s->sample_spec);
982 else {
983 pa_memchunk vchunk;
984
985 vchunk = info[0].chunk;
986 pa_memblock_ref(vchunk.memblock);
987
988 if (vchunk.length > length)
989 vchunk.length = length;
990
991 if (!pa_cvolume_is_norm(&volume)) {
992 pa_memchunk_make_writable(&vchunk, 0);
993 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
994 }
995
996 pa_memchunk_memcpy(target, &vchunk);
997 pa_memblock_unref(vchunk.memblock);
998 }
999
1000 } else {
1001 void *ptr;
1002
1003 ptr = pa_memblock_acquire(target->memblock);
1004
1005 target->length = pa_mix(info, n,
1006 (uint8_t*) ptr + target->index, length,
1007 &s->sample_spec,
1008 &s->thread_info.soft_volume,
1009 s->thread_info.soft_muted);
1010
1011 pa_memblock_release(target->memblock);
1012 }
1013
1014 inputs_drop(s, info, n, target);
1015
1016 pa_sink_unref(s);
1017 }
1018
1019 /* Called from IO thread context */
1020 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1021 pa_memchunk chunk;
1022 size_t l, d;
1023
1024 pa_sink_assert_ref(s);
1025 pa_sink_assert_io_context(s);
1026 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1027 pa_assert(target);
1028 pa_assert(target->memblock);
1029 pa_assert(target->length > 0);
1030 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1031
1032 pa_sink_ref(s);
1033
1034 pa_assert(!s->thread_info.rewind_requested);
1035 pa_assert(s->thread_info.rewind_nbytes == 0);
1036
1037 l = target->length;
1038 d = 0;
1039 while (l > 0) {
1040 chunk = *target;
1041 chunk.index += d;
1042 chunk.length -= d;
1043
1044 pa_sink_render_into(s, &chunk);
1045
1046 d += chunk.length;
1047 l -= chunk.length;
1048 }
1049
1050 pa_sink_unref(s);
1051 }
1052
1053 /* Called from IO thread context */
1054 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1055 pa_mix_info info[MAX_MIX_CHANNELS];
1056 size_t length1st = length;
1057 unsigned n;
1058
1059 pa_sink_assert_ref(s);
1060 pa_sink_assert_io_context(s);
1061 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1062 pa_assert(length > 0);
1063 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1064 pa_assert(result);
1065
1066 pa_sink_ref(s);
1067
1068 pa_assert(!s->thread_info.rewind_requested);
1069 pa_assert(s->thread_info.rewind_nbytes == 0);
1070
1071 pa_assert(length > 0);
1072
1073 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1074
1075 if (n == 0) {
1076 pa_silence_memchunk_get(&s->core->silence_cache,
1077 s->core->mempool,
1078 result,
1079 &s->sample_spec,
1080 length1st);
1081 } else if (n == 1) {
1082 pa_cvolume volume;
1083
1084 *result = info[0].chunk;
1085 pa_memblock_ref(result->memblock);
1086
1087 if (result->length > length)
1088 result->length = length;
1089
1090 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1091
1092 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1093 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1094 pa_memblock_unref(result->memblock);
1095 pa_silence_memchunk_get(&s->core->silence_cache,
1096 s->core->mempool,
1097 result,
1098 &s->sample_spec,
1099 result->length);
1100 } else {
1101 pa_memchunk_make_writable(result, length);
1102 pa_volume_memchunk(result, &s->sample_spec, &volume);
1103 }
1104 }
1105 } else {
1106 void *ptr;
1107
1108 result->index = 0;
1109 result->memblock = pa_memblock_new(s->core->mempool, length);
1110
1111 ptr = pa_memblock_acquire(result->memblock);
1112
1113 result->length = pa_mix(info, n,
1114 (uint8_t*) ptr + result->index, length1st,
1115 &s->sample_spec,
1116 &s->thread_info.soft_volume,
1117 s->thread_info.soft_muted);
1118
1119 pa_memblock_release(result->memblock);
1120 }
1121
1122 inputs_drop(s, info, n, result);
1123
1124 if (result->length < length) {
1125 pa_memchunk chunk;
1126 size_t l, d;
1127 pa_memchunk_make_writable(result, length);
1128
1129 l = length - result->length;
1130 d = result->index + result->length;
1131 while (l > 0) {
1132 chunk = *result;
1133 chunk.index = d;
1134 chunk.length = l;
1135
1136 pa_sink_render_into(s, &chunk);
1137
1138 d += chunk.length;
1139 l -= chunk.length;
1140 }
1141 result->length = length;
1142 }
1143
1144 pa_sink_unref(s);
1145 }
1146
1147 /* Called from main thread */
1148 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1149 pa_usec_t usec = 0;
1150
1151 pa_sink_assert_ref(s);
1152 pa_assert_ctl_context();
1153 pa_assert(PA_SINK_IS_LINKED(s->state));
1154
1155 /* The returned value is supposed to be in the time domain of the sound card! */
1156
1157 if (s->state == PA_SINK_SUSPENDED)
1158 return 0;
1159
1160 if (!(s->flags & PA_SINK_LATENCY))
1161 return 0;
1162
1163 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1164
1165 return usec;
1166 }
1167
1168 /* Called from IO thread */
1169 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1170 pa_usec_t usec = 0;
1171 pa_msgobject *o;
1172
1173 pa_sink_assert_ref(s);
1174 pa_sink_assert_io_context(s);
1175 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1176
1177 /* The returned value is supposed to be in the time domain of the sound card! */
1178
1179 if (s->thread_info.state == PA_SINK_SUSPENDED)
1180 return 0;
1181
1182 if (!(s->flags & PA_SINK_LATENCY))
1183 return 0;
1184
1185 o = PA_MSGOBJECT(s);
1186
1187 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1188
1189 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1190 return -1;
1191
1192 return usec;
1193 }
1194
1195 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1196 unsigned c;
1197
1198 pa_sink_input_assert_ref(i);
1199 pa_assert(new_volume->channels == i->sample_spec.channels);
1200
1201 /*
1202 * This basically calculates:
1203 *
1204 * i->relative_volume := i->virtual_volume / new_volume
1205 * i->soft_volume := i->relative_volume * i->volume_factor
1206 */
1207
1208 /* The new sink volume passed in here must already be remapped to
1209 * the sink input's channel map! */
1210
1211 i->soft_volume.channels = i->sample_spec.channels;
1212
1213 for (c = 0; c < i->sample_spec.channels; c++)
1214
1215 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1216 /* We leave i->relative_volume untouched */
1217 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1218 else {
1219 i->relative_volume[c] =
1220 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1221 pa_sw_volume_to_linear(new_volume->values[c]);
1222
1223 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1224 i->relative_volume[c] *
1225 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1226 }
1227
1228 /* Hooks have the ability to play games with i->soft_volume */
1229 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1230
1231 /* We don't copy the soft_volume to the thread_info data
1232 * here. That must be done by the caller */
1233 }
1234
1235 /* Called from main thread */
1236 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1237 pa_sink_input *i;
1238 uint32_t idx;
1239
1240 pa_sink_assert_ref(s);
1241 pa_assert_ctl_context();
1242 pa_assert(new_volume);
1243 pa_assert(PA_SINK_IS_LINKED(s->state));
1244 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1245
1246 /* This is called whenever a sink input volume changes or a sink
1247 * input is added/removed and we might need to fix up the sink
1248 * volume accordingly. Please note that we don't actually update
1249 * the sinks volume here, we only return how it needs to be
1250 * updated. The caller should then call pa_sink_set_volume().*/
1251
1252 if (pa_idxset_isempty(s->inputs)) {
1253 /* In the special case that we have no sink input we leave the
1254 * volume unmodified. */
1255 *new_volume = s->reference_volume;
1256 return;
1257 }
1258
1259 pa_cvolume_mute(new_volume, s->channel_map.channels);
1260
1261 /* First let's determine the new maximum volume of all inputs
1262 * connected to this sink */
1263 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1264 unsigned c;
1265 pa_cvolume remapped_volume;
1266
1267 remapped_volume = i->virtual_volume;
1268 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1269
1270 for (c = 0; c < new_volume->channels; c++)
1271 if (remapped_volume.values[c] > new_volume->values[c])
1272 new_volume->values[c] = remapped_volume.values[c];
1273 }
1274
1275 /* Then, let's update the soft volumes of all inputs connected
1276 * to this sink */
1277 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1278 pa_cvolume remapped_new_volume;
1279
1280 remapped_new_volume = *new_volume;
1281 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1282 compute_new_soft_volume(i, &remapped_new_volume);
1283
1284 /* We don't copy soft_volume to the thread_info data here
1285 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1286 * want the update to be atomically with the sink volume
1287 * update, hence we do it within the pa_sink_set_volume() call
1288 * below */
1289 }
1290 }
1291
1292 /* Called from main thread */
1293 void pa_sink_propagate_flat_volume(pa_sink *s) {
1294 pa_sink_input *i;
1295 uint32_t idx;
1296
1297 pa_sink_assert_ref(s);
1298 pa_assert_ctl_context();
1299 pa_assert(PA_SINK_IS_LINKED(s->state));
1300 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1301
1302 /* This is called whenever the sink volume changes that is not
1303 * caused by a sink input volume change. We need to fix up the
1304 * sink input volumes accordingly */
1305
1306 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1307 pa_cvolume sink_volume, new_virtual_volume;
1308 unsigned c;
1309
1310 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1311
1312 sink_volume = s->virtual_volume;
1313 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1314
1315 for (c = 0; c < i->sample_spec.channels; c++)
1316 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1317 i->relative_volume[c] *
1318 pa_sw_volume_to_linear(sink_volume.values[c]));
1319
1320 new_virtual_volume.channels = i->sample_spec.channels;
1321
1322 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1323 i->virtual_volume = new_virtual_volume;
1324
1325 /* Hmm, the soft volume might no longer actually match
1326 * what has been chosen as new virtual volume here,
1327 * especially when the old volume was
1328 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1329 * volumes here. */
1330 compute_new_soft_volume(i, &sink_volume);
1331
1332 /* The virtual volume changed, let's tell people so */
1333 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1334 }
1335 }
1336
1337 /* If the soft_volume of any of the sink inputs got changed, let's
1338 * make sure the thread copies are synced up. */
1339 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1340 }
1341
1342 /* Called from main thread */
1343 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1344 pa_bool_t virtual_volume_changed;
1345
1346 pa_sink_assert_ref(s);
1347 pa_assert_ctl_context();
1348 pa_assert(PA_SINK_IS_LINKED(s->state));
1349 pa_assert(volume);
1350 pa_assert(pa_cvolume_valid(volume));
1351 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1352
1353 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1354 s->virtual_volume = *volume;
1355 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1356
1357 if (become_reference)
1358 s->reference_volume = s->virtual_volume;
1359
1360 /* Propagate this volume change back to the inputs */
1361 if (virtual_volume_changed)
1362 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1363 pa_sink_propagate_flat_volume(s);
1364
1365 if (s->set_volume) {
1366 /* If we have a function set_volume(), then we do not apply a
1367 * soft volume by default. However, set_volume() is free to
1368 * apply one to s->soft_volume */
1369
1370 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1371 s->set_volume(s);
1372
1373 } else
1374 /* If we have no function set_volume(), then the soft volume
1375 * becomes the virtual volume */
1376 s->soft_volume = s->virtual_volume;
1377
1378 /* This tells the sink that soft and/or virtual volume changed */
1379 if (sendmsg)
1380 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1381
1382 if (virtual_volume_changed)
1383 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1384 }
1385
1386 /* Called from main thread. Only to be called by sink implementor */
1387 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1388 pa_sink_assert_ref(s);
1389 pa_assert_ctl_context();
1390 pa_assert(volume);
1391
1392 s->soft_volume = *volume;
1393
1394 if (PA_SINK_IS_LINKED(s->state))
1395 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1396 else
1397 s->thread_info.soft_volume = *volume;
1398 }
1399
1400 /* Called from main thread */
1401 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1402 pa_sink_assert_ref(s);
1403 pa_assert_ctl_context();
1404 pa_assert(PA_SINK_IS_LINKED(s->state));
1405
1406 if (s->refresh_volume || force_refresh) {
1407 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1408
1409 if (s->get_volume)
1410 s->get_volume(s);
1411
1412 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1413
1414 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1415
1416 s->reference_volume = s->virtual_volume;
1417
1418 /* Something got changed in the hardware. It probably
1419 * makes sense to save changed hw settings given that hw
1420 * volume changes not triggered by PA are almost certainly
1421 * done by the user. */
1422 s->save_volume = TRUE;
1423
1424 if (s->flags & PA_SINK_FLAT_VOLUME)
1425 pa_sink_propagate_flat_volume(s);
1426
1427 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1428 }
1429 }
1430
1431 return reference ? &s->reference_volume : &s->virtual_volume;
1432 }
1433
1434 /* Called from main thread */
1435 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1436 pa_sink_assert_ref(s);
1437 pa_assert_ctl_context();
1438 pa_assert(PA_SINK_IS_LINKED(s->state));
1439
1440 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1441 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1442 return;
1443
1444 s->reference_volume = s->virtual_volume = *new_volume;
1445 s->save_volume = TRUE;
1446
1447 if (s->flags & PA_SINK_FLAT_VOLUME)
1448 pa_sink_propagate_flat_volume(s);
1449
1450 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1451 }
1452
1453 /* Called from main thread */
1454 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1455 pa_bool_t old_muted;
1456
1457 pa_sink_assert_ref(s);
1458 pa_assert_ctl_context();
1459 pa_assert(PA_SINK_IS_LINKED(s->state));
1460
1461 old_muted = s->muted;
1462 s->muted = mute;
1463 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1464
1465 if (s->set_mute)
1466 s->set_mute(s);
1467
1468 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1469
1470 if (old_muted != s->muted)
1471 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1472 }
1473
1474 /* Called from main thread */
1475 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1476
1477 pa_sink_assert_ref(s);
1478 pa_assert_ctl_context();
1479 pa_assert(PA_SINK_IS_LINKED(s->state));
1480
1481 if (s->refresh_muted || force_refresh) {
1482 pa_bool_t old_muted = s->muted;
1483
1484 if (s->get_mute)
1485 s->get_mute(s);
1486
1487 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1488
1489 if (old_muted != s->muted) {
1490 s->save_muted = TRUE;
1491
1492 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1493
1494 /* Make sure the soft mute status stays in sync */
1495 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1496 }
1497 }
1498
1499
1500 return s->muted;
1501 }
1502
1503 /* Called from main thread */
1504 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1505 pa_sink_assert_ref(s);
1506 pa_assert_ctl_context();
1507 pa_assert(PA_SINK_IS_LINKED(s->state));
1508
1509 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1510
1511 if (s->muted == new_muted)
1512 return;
1513
1514 s->muted = new_muted;
1515 s->save_muted = TRUE;
1516
1517 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1518 }
1519
1520 /* Called from main thread */
1521 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1522 pa_sink_assert_ref(s);
1523 pa_assert_ctl_context();
1524
1525 if (p)
1526 pa_proplist_update(s->proplist, mode, p);
1527
1528 if (PA_SINK_IS_LINKED(s->state)) {
1529 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1530 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1531 }
1532
1533 return TRUE;
1534 }
1535
1536 /* Called from main thread */
1537 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1538 void pa_sink_set_description(pa_sink *s, const char *description) {
1539 const char *old;
1540 pa_sink_assert_ref(s);
1541 pa_assert_ctl_context();
1542
1543 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1544 return;
1545
1546 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1547
1548 if (old && description && pa_streq(old, description))
1549 return;
1550
1551 if (description)
1552 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1553 else
1554 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1555
1556 if (s->monitor_source) {
1557 char *n;
1558
1559 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1560 pa_source_set_description(s->monitor_source, n);
1561 pa_xfree(n);
1562 }
1563
1564 if (PA_SINK_IS_LINKED(s->state)) {
1565 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1566 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1567 }
1568 }
1569
1570 /* Called from main thread */
1571 unsigned pa_sink_linked_by(pa_sink *s) {
1572 unsigned ret;
1573
1574 pa_sink_assert_ref(s);
1575 pa_assert_ctl_context();
1576 pa_assert(PA_SINK_IS_LINKED(s->state));
1577
1578 ret = pa_idxset_size(s->inputs);
1579
1580 /* We add in the number of streams connected to us here. Please
1581 * note the asymmmetry to pa_sink_used_by()! */
1582
1583 if (s->monitor_source)
1584 ret += pa_source_linked_by(s->monitor_source);
1585
1586 return ret;
1587 }
1588
1589 /* Called from main thread */
1590 unsigned pa_sink_used_by(pa_sink *s) {
1591 unsigned ret;
1592
1593 pa_sink_assert_ref(s);
1594 pa_assert_ctl_context();
1595 pa_assert(PA_SINK_IS_LINKED(s->state));
1596
1597 ret = pa_idxset_size(s->inputs);
1598 pa_assert(ret >= s->n_corked);
1599
1600 /* Streams connected to our monitor source do not matter for
1601 * pa_sink_used_by()!.*/
1602
1603 return ret - s->n_corked;
1604 }
1605
1606 /* Called from main thread */
1607 unsigned pa_sink_check_suspend(pa_sink *s) {
1608 unsigned ret;
1609 pa_sink_input *i;
1610 uint32_t idx;
1611
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1614
1615 if (!PA_SINK_IS_LINKED(s->state))
1616 return 0;
1617
1618 ret = 0;
1619
1620 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1621 pa_sink_input_state_t st;
1622
1623 st = pa_sink_input_get_state(i);
1624 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1625
1626 if (st == PA_SINK_INPUT_CORKED)
1627 continue;
1628
1629 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1630 continue;
1631
1632 ret ++;
1633 }
1634
1635 if (s->monitor_source)
1636 ret += pa_source_check_suspend(s->monitor_source);
1637
1638 return ret;
1639 }
1640
1641 /* Called from the IO thread */
1642 static void sync_input_volumes_within_thread(pa_sink *s) {
1643 pa_sink_input *i;
1644 void *state = NULL;
1645
1646 pa_sink_assert_ref(s);
1647 pa_sink_assert_io_context(s);
1648
1649 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1650 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1651 continue;
1652
1653 i->thread_info.soft_volume = i->soft_volume;
1654 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1655 }
1656 }
1657
1658 /* Called from IO thread, except when it is not */
1659 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1660 pa_sink *s = PA_SINK(o);
1661 pa_sink_assert_ref(s);
1662
1663 switch ((pa_sink_message_t) code) {
1664
1665 case PA_SINK_MESSAGE_ADD_INPUT: {
1666 pa_sink_input *i = PA_SINK_INPUT(userdata);
1667
1668 /* If you change anything here, make sure to change the
1669 * sink input handling a few lines down at
1670 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1671
1672 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1673
1674 /* Since the caller sleeps in pa_sink_input_put(), we can
1675 * safely access data outside of thread_info even though
1676 * it is mutable */
1677
1678 if ((i->thread_info.sync_prev = i->sync_prev)) {
1679 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1680 pa_assert(i->sync_prev->sync_next == i);
1681 i->thread_info.sync_prev->thread_info.sync_next = i;
1682 }
1683
1684 if ((i->thread_info.sync_next = i->sync_next)) {
1685 pa_assert(i->sink == i->thread_info.sync_next->sink);
1686 pa_assert(i->sync_next->sync_prev == i);
1687 i->thread_info.sync_next->thread_info.sync_prev = i;
1688 }
1689
1690 pa_assert(!i->thread_info.attached);
1691 i->thread_info.attached = TRUE;
1692
1693 if (i->attach)
1694 i->attach(i);
1695
1696 pa_sink_input_set_state_within_thread(i, i->state);
1697
1698 /* The requested latency of the sink input needs to be
1699 * fixed up and then configured on the sink */
1700
1701 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1702 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1703
1704 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1705 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1706
1707 /* We don't rewind here automatically. This is left to the
1708 * sink input implementor because some sink inputs need a
1709 * slow start, i.e. need some time to buffer client
1710 * samples before beginning streaming. */
1711
1712 /* In flat volume mode we need to update the volume as
1713 * well */
1714 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1715 }
1716
1717 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1718 pa_sink_input *i = PA_SINK_INPUT(userdata);
1719
1720 /* If you change anything here, make sure to change the
1721 * sink input handling a few lines down at
1722 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1723
1724 if (i->detach)
1725 i->detach(i);
1726
1727 pa_sink_input_set_state_within_thread(i, i->state);
1728
1729 pa_assert(i->thread_info.attached);
1730 i->thread_info.attached = FALSE;
1731
1732 /* Since the caller sleeps in pa_sink_input_unlink(),
1733 * we can safely access data outside of thread_info even
1734 * though it is mutable */
1735
1736 pa_assert(!i->sync_prev);
1737 pa_assert(!i->sync_next);
1738
1739 if (i->thread_info.sync_prev) {
1740 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1741 i->thread_info.sync_prev = NULL;
1742 }
1743
1744 if (i->thread_info.sync_next) {
1745 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1746 i->thread_info.sync_next = NULL;
1747 }
1748
1749 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1750 pa_sink_input_unref(i);
1751
1752 pa_sink_invalidate_requested_latency(s);
1753 pa_sink_request_rewind(s, (size_t) -1);
1754
1755 /* In flat volume mode we need to update the volume as
1756 * well */
1757 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1758 }
1759
1760 case PA_SINK_MESSAGE_START_MOVE: {
1761 pa_sink_input *i = PA_SINK_INPUT(userdata);
1762
1763 /* We don't support moving synchronized streams. */
1764 pa_assert(!i->sync_prev);
1765 pa_assert(!i->sync_next);
1766 pa_assert(!i->thread_info.sync_next);
1767 pa_assert(!i->thread_info.sync_prev);
1768
1769 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1770 pa_usec_t usec = 0;
1771 size_t sink_nbytes, total_nbytes;
1772
1773 /* Get the latency of the sink */
1774 if (!(s->flags & PA_SINK_LATENCY) ||
1775 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1776 usec = 0;
1777
1778 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1779 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1780
1781 if (total_nbytes > 0) {
1782 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1783 i->thread_info.rewrite_flush = TRUE;
1784 pa_sink_input_process_rewind(i, sink_nbytes);
1785 }
1786 }
1787
1788 if (i->detach)
1789 i->detach(i);
1790
1791 pa_assert(i->thread_info.attached);
1792 i->thread_info.attached = FALSE;
1793
1794 /* Let's remove the sink input ...*/
1795 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1796 pa_sink_input_unref(i);
1797
1798 pa_sink_invalidate_requested_latency(s);
1799
1800 pa_log_debug("Requesting rewind due to started move");
1801 pa_sink_request_rewind(s, (size_t) -1);
1802
1803 /* In flat volume mode we need to update the volume as
1804 * well */
1805 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1806 }
1807
1808 case PA_SINK_MESSAGE_FINISH_MOVE: {
1809 pa_sink_input *i = PA_SINK_INPUT(userdata);
1810
1811 /* We don't support moving synchronized streams. */
1812 pa_assert(!i->sync_prev);
1813 pa_assert(!i->sync_next);
1814 pa_assert(!i->thread_info.sync_next);
1815 pa_assert(!i->thread_info.sync_prev);
1816
1817 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1818
1819 pa_assert(!i->thread_info.attached);
1820 i->thread_info.attached = TRUE;
1821
1822 if (i->attach)
1823 i->attach(i);
1824
1825 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1826 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1827
1828 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1829 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1830
1831 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1832 pa_usec_t usec = 0;
1833 size_t nbytes;
1834
1835 /* Get the latency of the sink */
1836 if (!(s->flags & PA_SINK_LATENCY) ||
1837 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1838 usec = 0;
1839
1840 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1841
1842 if (nbytes > 0)
1843 pa_sink_input_drop(i, nbytes);
1844
1845 pa_log_debug("Requesting rewind due to finished move");
1846 pa_sink_request_rewind(s, nbytes);
1847 }
1848
1849 /* In flat volume mode we need to update the volume as
1850 * well */
1851 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1852 }
1853
1854 case PA_SINK_MESSAGE_SET_VOLUME:
1855
1856 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1857 s->thread_info.soft_volume = s->soft_volume;
1858 pa_sink_request_rewind(s, (size_t) -1);
1859 }
1860
1861 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1862 return 0;
1863
1864 /* Fall through ... */
1865
1866 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1867 sync_input_volumes_within_thread(s);
1868 return 0;
1869
1870 case PA_SINK_MESSAGE_GET_VOLUME:
1871 return 0;
1872
1873 case PA_SINK_MESSAGE_SET_MUTE:
1874
1875 if (s->thread_info.soft_muted != s->muted) {
1876 s->thread_info.soft_muted = s->muted;
1877 pa_sink_request_rewind(s, (size_t) -1);
1878 }
1879
1880 return 0;
1881
1882 case PA_SINK_MESSAGE_GET_MUTE:
1883 return 0;
1884
1885 case PA_SINK_MESSAGE_SET_STATE: {
1886
1887 pa_bool_t suspend_change =
1888 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1889 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1890
1891 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1892
1893 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1894 s->thread_info.rewind_nbytes = 0;
1895 s->thread_info.rewind_requested = FALSE;
1896 }
1897
1898 if (suspend_change) {
1899 pa_sink_input *i;
1900 void *state = NULL;
1901
1902 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1903 if (i->suspend_within_thread)
1904 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1905 }
1906
1907 return 0;
1908 }
1909
1910 case PA_SINK_MESSAGE_DETACH:
1911
1912 /* Detach all streams */
1913 pa_sink_detach_within_thread(s);
1914 return 0;
1915
1916 case PA_SINK_MESSAGE_ATTACH:
1917
1918 /* Reattach all streams */
1919 pa_sink_attach_within_thread(s);
1920 return 0;
1921
1922 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1923
1924 pa_usec_t *usec = userdata;
1925 *usec = pa_sink_get_requested_latency_within_thread(s);
1926
1927 if (*usec == (pa_usec_t) -1)
1928 *usec = s->thread_info.max_latency;
1929
1930 return 0;
1931 }
1932
1933 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1934 pa_usec_t *r = userdata;
1935
1936 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1937
1938 return 0;
1939 }
1940
1941 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1942 pa_usec_t *r = userdata;
1943
1944 r[0] = s->thread_info.min_latency;
1945 r[1] = s->thread_info.max_latency;
1946
1947 return 0;
1948 }
1949
1950 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1951
1952 *((size_t*) userdata) = s->thread_info.max_rewind;
1953 return 0;
1954
1955 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1956
1957 *((size_t*) userdata) = s->thread_info.max_request;
1958 return 0;
1959
1960 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1961
1962 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1963 return 0;
1964
1965 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1966
1967 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1968 return 0;
1969
1970 case PA_SINK_MESSAGE_GET_LATENCY:
1971 case PA_SINK_MESSAGE_MAX:
1972 ;
1973 }
1974
1975 return -1;
1976 }
1977
1978 /* Called from main thread */
1979 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1980 pa_sink *sink;
1981 uint32_t idx;
1982 int ret = 0;
1983
1984 pa_core_assert_ref(c);
1985 pa_assert_ctl_context();
1986 pa_assert(cause != 0);
1987
1988 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
1989 int r;
1990
1991 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1992 ret = r;
1993 }
1994
1995 return ret;
1996 }
1997
1998 /* Called from main thread */
1999 void pa_sink_detach(pa_sink *s) {
2000 pa_sink_assert_ref(s);
2001 pa_assert_ctl_context();
2002 pa_assert(PA_SINK_IS_LINKED(s->state));
2003
2004 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2005 }
2006
2007 /* Called from main thread */
2008 void pa_sink_attach(pa_sink *s) {
2009 pa_sink_assert_ref(s);
2010 pa_assert_ctl_context();
2011 pa_assert(PA_SINK_IS_LINKED(s->state));
2012
2013 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2014 }
2015
2016 /* Called from IO thread */
2017 void pa_sink_detach_within_thread(pa_sink *s) {
2018 pa_sink_input *i;
2019 void *state = NULL;
2020
2021 pa_sink_assert_ref(s);
2022 pa_sink_assert_io_context(s);
2023 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2024
2025 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2026 if (i->detach)
2027 i->detach(i);
2028
2029 if (s->monitor_source)
2030 pa_source_detach_within_thread(s->monitor_source);
2031 }
2032
2033 /* Called from IO thread */
2034 void pa_sink_attach_within_thread(pa_sink *s) {
2035 pa_sink_input *i;
2036 void *state = NULL;
2037
2038 pa_sink_assert_ref(s);
2039 pa_sink_assert_io_context(s);
2040 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2041
2042 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2043 if (i->attach)
2044 i->attach(i);
2045
2046 if (s->monitor_source)
2047 pa_source_attach_within_thread(s->monitor_source);
2048 }
2049
2050 /* Called from IO thread */
2051 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2052 pa_sink_assert_ref(s);
2053 pa_sink_assert_io_context(s);
2054 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2055
2056 if (s->thread_info.state == PA_SINK_SUSPENDED)
2057 return;
2058
2059 if (nbytes == (size_t) -1)
2060 nbytes = s->thread_info.max_rewind;
2061
2062 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2063
2064 if (s->thread_info.rewind_requested &&
2065 nbytes <= s->thread_info.rewind_nbytes)
2066 return;
2067
2068 s->thread_info.rewind_nbytes = nbytes;
2069 s->thread_info.rewind_requested = TRUE;
2070
2071 if (s->request_rewind)
2072 s->request_rewind(s);
2073 }
2074
2075 /* Called from IO thread */
2076 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2077 pa_usec_t result = (pa_usec_t) -1;
2078 pa_sink_input *i;
2079 void *state = NULL;
2080 pa_usec_t monitor_latency;
2081
2082 pa_sink_assert_ref(s);
2083 pa_sink_assert_io_context(s);
2084
2085 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2086 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2087
2088 if (s->thread_info.requested_latency_valid)
2089 return s->thread_info.requested_latency;
2090
2091 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2092
2093 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2094 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2095 result = i->thread_info.requested_sink_latency;
2096
2097 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2098
2099 if (monitor_latency != (pa_usec_t) -1 &&
2100 (result == (pa_usec_t) -1 || result > monitor_latency))
2101 result = monitor_latency;
2102
2103 if (result != (pa_usec_t) -1)
2104 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2105
2106 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2107 /* Only cache if properly initialized */
2108 s->thread_info.requested_latency = result;
2109 s->thread_info.requested_latency_valid = TRUE;
2110 }
2111
2112 return result;
2113 }
2114
2115 /* Called from main thread */
2116 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2117 pa_usec_t usec = 0;
2118
2119 pa_sink_assert_ref(s);
2120 pa_assert_ctl_context();
2121 pa_assert(PA_SINK_IS_LINKED(s->state));
2122
2123 if (s->state == PA_SINK_SUSPENDED)
2124 return 0;
2125
2126 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2127 return usec;
2128 }
2129
2130 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2131 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2132 pa_sink_input *i;
2133 void *state = NULL;
2134
2135 pa_sink_assert_ref(s);
2136 pa_sink_assert_io_context(s);
2137
2138 if (max_rewind == s->thread_info.max_rewind)
2139 return;
2140
2141 s->thread_info.max_rewind = max_rewind;
2142
2143 if (PA_SINK_IS_LINKED(s->thread_info.state))
2144 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2145 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2146
2147 if (s->monitor_source)
2148 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2149 }
2150
2151 /* Called from main thread */
2152 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2153 pa_sink_assert_ref(s);
2154 pa_assert_ctl_context();
2155
2156 if (PA_SINK_IS_LINKED(s->state))
2157 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2158 else
2159 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2160 }
2161
2162 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2163 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2164 void *state = NULL;
2165
2166 pa_sink_assert_ref(s);
2167 pa_sink_assert_io_context(s);
2168
2169 if (max_request == s->thread_info.max_request)
2170 return;
2171
2172 s->thread_info.max_request = max_request;
2173
2174 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2175 pa_sink_input *i;
2176
2177 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2178 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2179 }
2180 }
2181
2182 /* Called from main thread */
2183 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2184 pa_sink_assert_ref(s);
2185 pa_assert_ctl_context();
2186
2187 if (PA_SINK_IS_LINKED(s->state))
2188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2189 else
2190 pa_sink_set_max_request_within_thread(s, max_request);
2191 }
2192
2193 /* Called from IO thread */
2194 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2195 pa_sink_input *i;
2196 void *state = NULL;
2197
2198 pa_sink_assert_ref(s);
2199 pa_sink_assert_io_context(s);
2200
2201 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2202 return;
2203
2204 s->thread_info.requested_latency_valid = FALSE;
2205
2206 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2207
2208 if (s->update_requested_latency)
2209 s->update_requested_latency(s);
2210
2211 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2212 if (i->update_sink_requested_latency)
2213 i->update_sink_requested_latency(i);
2214 }
2215 }
2216
2217 /* Called from main thread */
2218 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2219 pa_sink_assert_ref(s);
2220 pa_assert_ctl_context();
2221
2222 /* min_latency == 0: no limit
2223 * min_latency anything else: specified limit
2224 *
2225 * Similar for max_latency */
2226
2227 if (min_latency < ABSOLUTE_MIN_LATENCY)
2228 min_latency = ABSOLUTE_MIN_LATENCY;
2229
2230 if (max_latency <= 0 ||
2231 max_latency > ABSOLUTE_MAX_LATENCY)
2232 max_latency = ABSOLUTE_MAX_LATENCY;
2233
2234 pa_assert(min_latency <= max_latency);
2235
2236 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2237 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2238 max_latency == ABSOLUTE_MAX_LATENCY) ||
2239 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2240
2241 if (PA_SINK_IS_LINKED(s->state)) {
2242 pa_usec_t r[2];
2243
2244 r[0] = min_latency;
2245 r[1] = max_latency;
2246
2247 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2248 } else
2249 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2250 }
2251
2252 /* Called from main thread */
2253 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2254 pa_sink_assert_ref(s);
2255 pa_assert_ctl_context();
2256 pa_assert(min_latency);
2257 pa_assert(max_latency);
2258
2259 if (PA_SINK_IS_LINKED(s->state)) {
2260 pa_usec_t r[2] = { 0, 0 };
2261
2262 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2263
2264 *min_latency = r[0];
2265 *max_latency = r[1];
2266 } else {
2267 *min_latency = s->thread_info.min_latency;
2268 *max_latency = s->thread_info.max_latency;
2269 }
2270 }
2271
2272 /* Called from IO thread */
2273 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2274 void *state = NULL;
2275
2276 pa_sink_assert_ref(s);
2277 pa_sink_assert_io_context(s);
2278
2279 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2280 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2281 pa_assert(min_latency <= max_latency);
2282
2283 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2284 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2285 max_latency == ABSOLUTE_MAX_LATENCY) ||
2286 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2287
2288 s->thread_info.min_latency = min_latency;
2289 s->thread_info.max_latency = max_latency;
2290
2291 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2292 pa_sink_input *i;
2293
2294 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2295 if (i->update_sink_latency_range)
2296 i->update_sink_latency_range(i);
2297 }
2298
2299 pa_sink_invalidate_requested_latency(s);
2300
2301 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2302 }
2303
2304 /* Called from main thread, before the sink is put */
2305 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2306 pa_sink_assert_ref(s);
2307 pa_assert_ctl_context();
2308 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2309
2310 if (latency < ABSOLUTE_MIN_LATENCY)
2311 latency = ABSOLUTE_MIN_LATENCY;
2312
2313 if (latency > ABSOLUTE_MAX_LATENCY)
2314 latency = ABSOLUTE_MAX_LATENCY;
2315
2316 s->fixed_latency = latency;
2317 pa_source_set_fixed_latency(s->monitor_source, latency);
2318 }
2319
2320 /* Called from main context */
2321 size_t pa_sink_get_max_rewind(pa_sink *s) {
2322 size_t r;
2323 pa_sink_assert_ref(s);
2324 pa_assert_ctl_context();
2325
2326 if (!PA_SINK_IS_LINKED(s->state))
2327 return s->thread_info.max_rewind;
2328
2329 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2330
2331 return r;
2332 }
2333
2334 /* Called from main context */
2335 size_t pa_sink_get_max_request(pa_sink *s) {
2336 size_t r;
2337 pa_sink_assert_ref(s);
2338 pa_assert_ctl_context();
2339
2340 if (!PA_SINK_IS_LINKED(s->state))
2341 return s->thread_info.max_request;
2342
2343 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2344
2345 return r;
2346 }
2347
2348 /* Called from main context */
2349 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2350 pa_device_port *port;
2351
2352 pa_sink_assert_ref(s);
2353 pa_assert_ctl_context();
2354
2355 if (!s->set_port) {
2356 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2357 return -PA_ERR_NOTIMPLEMENTED;
2358 }
2359
2360 if (!s->ports)
2361 return -PA_ERR_NOENTITY;
2362
2363 if (!(port = pa_hashmap_get(s->ports, name)))
2364 return -PA_ERR_NOENTITY;
2365
2366 if (s->active_port == port) {
2367 s->save_port = s->save_port || save;
2368 return 0;
2369 }
2370
2371 if ((s->set_port(s, port)) < 0)
2372 return -PA_ERR_NOENTITY;
2373
2374 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2375
2376 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2377
2378 s->active_port = port;
2379 s->save_port = save;
2380
2381 return 0;
2382 }
2383
2384 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2385 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2386
2387 pa_assert(p);
2388
2389 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2390 return TRUE;
2391
2392 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2393
2394 if (pa_streq(ff, "microphone"))
2395 t = "audio-input-microphone";
2396 else if (pa_streq(ff, "webcam"))
2397 t = "camera-web";
2398 else if (pa_streq(ff, "computer"))
2399 t = "computer";
2400 else if (pa_streq(ff, "handset"))
2401 t = "phone";
2402 else if (pa_streq(ff, "portable"))
2403 t = "multimedia-player";
2404 else if (pa_streq(ff, "tv"))
2405 t = "video-display";
2406
2407 /*
2408 * The following icons are not part of the icon naming spec,
2409 * because Rodney Dawes sucks as the maintainer of that spec.
2410 *
2411 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2412 */
2413 else if (pa_streq(ff, "headset"))
2414 t = "audio-headset";
2415 else if (pa_streq(ff, "headphone"))
2416 t = "audio-headphones";
2417 else if (pa_streq(ff, "speaker"))
2418 t = "audio-speakers";
2419 else if (pa_streq(ff, "hands-free"))
2420 t = "audio-handsfree";
2421 }
2422
2423 if (!t)
2424 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2425 if (pa_streq(c, "modem"))
2426 t = "modem";
2427
2428 if (!t) {
2429 if (is_sink)
2430 t = "audio-card";
2431 else
2432 t = "audio-input-microphone";
2433 }
2434
2435 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2436 if (strstr(profile, "analog"))
2437 s = "-analog";
2438 else if (strstr(profile, "iec958"))
2439 s = "-iec958";
2440 else if (strstr(profile, "hdmi"))
2441 s = "-hdmi";
2442 }
2443
2444 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2445
2446 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2447
2448 return TRUE;
2449 }
2450
2451 pa_bool_t pa_device_init_description(pa_proplist *p) {
2452 const char *s, *d = NULL, *k;
2453 pa_assert(p);
2454
2455 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2456 return TRUE;
2457
2458 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2459 if (pa_streq(s, "internal"))
2460 d = _("Internal Audio");
2461
2462 if (!d)
2463 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2464 if (pa_streq(s, "modem"))
2465 d = _("Modem");
2466
2467 if (!d)
2468 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2469
2470 if (!d)
2471 return FALSE;
2472
2473 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2474
2475 if (d && k)
2476 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2477 else if (d)
2478 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2479
2480 return TRUE;
2481 }
2482
2483 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2484 const char *s;
2485 pa_assert(p);
2486
2487 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2488 return TRUE;
2489
2490 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2491 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2492 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2493 return TRUE;
2494 }
2495
2496 return FALSE;
2497 }