]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: handle suspended state in pa_sink_render_full() similar to the other render...
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state = NULL;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_sink_ref(s);
888
889 pa_assert(!s->thread_info.rewind_requested);
890 pa_assert(s->thread_info.rewind_nbytes == 0);
891
892 if (s->thread_info.state == PA_SINK_SUSPENDED) {
893 result->memblock = pa_memblock_ref(s->silence.memblock);
894 result->index = s->silence.index;
895 result->length = PA_MIN(s->silence.length, length);
896
897 pa_sink_unref(s);
898 return;
899 }
900
901 if (length <= 0)
902 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
903
904 block_size_max = pa_mempool_block_size_max(s->core->mempool);
905 if (length > block_size_max)
906 length = pa_frame_align(block_size_max, &s->sample_spec);
907
908 pa_assert(length > 0);
909
910 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
911
912 if (n == 0) {
913
914 *result = s->silence;
915 pa_memblock_ref(result->memblock);
916
917 if (result->length > length)
918 result->length = length;
919
920 } else if (n == 1) {
921 pa_cvolume volume;
922
923 *result = info[0].chunk;
924 pa_memblock_ref(result->memblock);
925
926 if (result->length > length)
927 result->length = length;
928
929 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
930
931 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
932 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
933 pa_memblock_unref(result->memblock);
934 pa_silence_memchunk_get(&s->core->silence_cache,
935 s->core->mempool,
936 result,
937 &s->sample_spec,
938 result->length);
939 } else {
940 pa_memchunk_make_writable(result, 0);
941 pa_volume_memchunk(result, &s->sample_spec, &volume);
942 }
943 }
944 } else {
945 void *ptr;
946 result->memblock = pa_memblock_new(s->core->mempool, length);
947
948 ptr = pa_memblock_acquire(result->memblock);
949 result->length = pa_mix(info, n,
950 ptr, length,
951 &s->sample_spec,
952 &s->thread_info.soft_volume,
953 s->thread_info.soft_muted);
954 pa_memblock_release(result->memblock);
955
956 result->index = 0;
957 }
958
959 inputs_drop(s, info, n, result);
960
961 pa_sink_unref(s);
962 }
963
964 /* Called from IO thread context */
965 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
966 pa_mix_info info[MAX_MIX_CHANNELS];
967 unsigned n;
968 size_t length, block_size_max;
969
970 pa_sink_assert_ref(s);
971 pa_sink_assert_io_context(s);
972 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
973 pa_assert(target);
974 pa_assert(target->memblock);
975 pa_assert(target->length > 0);
976 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
977
978 pa_sink_ref(s);
979
980 pa_assert(!s->thread_info.rewind_requested);
981 pa_assert(s->thread_info.rewind_nbytes == 0);
982
983 if (s->thread_info.state == PA_SINK_SUSPENDED) {
984 pa_silence_memchunk(target, &s->sample_spec);
985 pa_sink_unref(s);
986 return;
987 }
988
989 length = target->length;
990 block_size_max = pa_mempool_block_size_max(s->core->mempool);
991 if (length > block_size_max)
992 length = pa_frame_align(block_size_max, &s->sample_spec);
993
994 pa_assert(length > 0);
995
996 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
997
998 if (n == 0) {
999 if (target->length > length)
1000 target->length = length;
1001
1002 pa_silence_memchunk(target, &s->sample_spec);
1003 } else if (n == 1) {
1004 pa_cvolume volume;
1005
1006 if (target->length > length)
1007 target->length = length;
1008
1009 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1010
1011 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1012 pa_silence_memchunk(target, &s->sample_spec);
1013 else {
1014 pa_memchunk vchunk;
1015
1016 vchunk = info[0].chunk;
1017 pa_memblock_ref(vchunk.memblock);
1018
1019 if (vchunk.length > length)
1020 vchunk.length = length;
1021
1022 if (!pa_cvolume_is_norm(&volume)) {
1023 pa_memchunk_make_writable(&vchunk, 0);
1024 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1025 }
1026
1027 pa_memchunk_memcpy(target, &vchunk);
1028 pa_memblock_unref(vchunk.memblock);
1029 }
1030
1031 } else {
1032 void *ptr;
1033
1034 ptr = pa_memblock_acquire(target->memblock);
1035
1036 target->length = pa_mix(info, n,
1037 (uint8_t*) ptr + target->index, length,
1038 &s->sample_spec,
1039 &s->thread_info.soft_volume,
1040 s->thread_info.soft_muted);
1041
1042 pa_memblock_release(target->memblock);
1043 }
1044
1045 inputs_drop(s, info, n, target);
1046
1047 pa_sink_unref(s);
1048 }
1049
1050 /* Called from IO thread context */
1051 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1052 pa_memchunk chunk;
1053 size_t l, d;
1054
1055 pa_sink_assert_ref(s);
1056 pa_sink_assert_io_context(s);
1057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1058 pa_assert(target);
1059 pa_assert(target->memblock);
1060 pa_assert(target->length > 0);
1061 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1062
1063 pa_sink_ref(s);
1064
1065 pa_assert(!s->thread_info.rewind_requested);
1066 pa_assert(s->thread_info.rewind_nbytes == 0);
1067
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1074
1075 pa_sink_render_into(s, &chunk);
1076
1077 d += chunk.length;
1078 l -= chunk.length;
1079 }
1080
1081 pa_sink_unref(s);
1082 }
1083
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_mix_info info[MAX_MIX_CHANNELS];
1087 size_t length1st = length;
1088 unsigned n;
1089
1090 pa_sink_assert_ref(s);
1091 pa_sink_assert_io_context(s);
1092 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1093 pa_assert(length > 0);
1094 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1095 pa_assert(result);
1096
1097 pa_sink_ref(s);
1098
1099 pa_assert(!s->thread_info.rewind_requested);
1100 pa_assert(s->thread_info.rewind_nbytes == 0);
1101
1102 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1103 pa_silence_memchunk_get(&s->core->silence_cache,
1104 s->core->mempool,
1105 result,
1106 &s->sample_spec,
1107 length1st);
1108
1109 pa_sink_unref(s);
1110 return;
1111 }
1112
1113 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1114
1115 if (n == 0) {
1116 pa_silence_memchunk_get(&s->core->silence_cache,
1117 s->core->mempool,
1118 result,
1119 &s->sample_spec,
1120 length1st);
1121 } else if (n == 1) {
1122 pa_cvolume volume;
1123
1124 *result = info[0].chunk;
1125 pa_memblock_ref(result->memblock);
1126
1127 if (result->length > length)
1128 result->length = length;
1129
1130 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1131
1132 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1133 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1134 pa_memblock_unref(result->memblock);
1135 pa_silence_memchunk_get(&s->core->silence_cache,
1136 s->core->mempool,
1137 result,
1138 &s->sample_spec,
1139 result->length);
1140 } else {
1141 pa_memchunk_make_writable(result, length);
1142 pa_volume_memchunk(result, &s->sample_spec, &volume);
1143 }
1144 }
1145 } else {
1146 void *ptr;
1147
1148 result->index = 0;
1149 result->memblock = pa_memblock_new(s->core->mempool, length);
1150
1151 ptr = pa_memblock_acquire(result->memblock);
1152
1153 result->length = pa_mix(info, n,
1154 (uint8_t*) ptr + result->index, length1st,
1155 &s->sample_spec,
1156 &s->thread_info.soft_volume,
1157 s->thread_info.soft_muted);
1158
1159 pa_memblock_release(result->memblock);
1160 }
1161
1162 inputs_drop(s, info, n, result);
1163
1164 if (result->length < length) {
1165 pa_memchunk chunk;
1166 size_t l, d;
1167 pa_memchunk_make_writable(result, length);
1168
1169 l = length - result->length;
1170 d = result->index + result->length;
1171 while (l > 0) {
1172 chunk = *result;
1173 chunk.index = d;
1174 chunk.length = l;
1175
1176 pa_sink_render_into(s, &chunk);
1177
1178 d += chunk.length;
1179 l -= chunk.length;
1180 }
1181 result->length = length;
1182 }
1183
1184 pa_sink_unref(s);
1185 }
1186
1187 /* Called from main thread */
1188 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1189 pa_usec_t usec = 0;
1190
1191 pa_sink_assert_ref(s);
1192 pa_assert_ctl_context();
1193 pa_assert(PA_SINK_IS_LINKED(s->state));
1194
1195 /* The returned value is supposed to be in the time domain of the sound card! */
1196
1197 if (s->state == PA_SINK_SUSPENDED)
1198 return 0;
1199
1200 if (!(s->flags & PA_SINK_LATENCY))
1201 return 0;
1202
1203 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1204
1205 return usec;
1206 }
1207
1208 /* Called from IO thread */
1209 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1210 pa_usec_t usec = 0;
1211 pa_msgobject *o;
1212
1213 pa_sink_assert_ref(s);
1214 pa_sink_assert_io_context(s);
1215 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1216
1217 /* The returned value is supposed to be in the time domain of the sound card! */
1218
1219 if (s->thread_info.state == PA_SINK_SUSPENDED)
1220 return 0;
1221
1222 if (!(s->flags & PA_SINK_LATENCY))
1223 return 0;
1224
1225 o = PA_MSGOBJECT(s);
1226
1227 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1228
1229 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1230 return -1;
1231
1232 return usec;
1233 }
1234
1235 /* Called from main context */
1236 static void compute_reference_ratios(pa_sink *s) {
1237 uint32_t idx;
1238 pa_sink_input *i;
1239
1240 pa_sink_assert_ref(s);
1241 pa_assert_ctl_context();
1242 pa_assert(PA_SINK_IS_LINKED(s->state));
1243 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1244
1245 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1246 unsigned c;
1247 pa_cvolume remapped;
1248
1249 /*
1250 * Calculates the reference volume from the sink's reference
1251 * volume. This basically calculates:
1252 *
1253 * i->reference_ratio = i->volume / s->reference_volume
1254 */
1255
1256 remapped = s->reference_volume;
1257 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1258
1259 i->reference_ratio.channels = i->sample_spec.channels;
1260
1261 for (c = 0; c < i->sample_spec.channels; c++) {
1262
1263 /* We don't update when the sink volume is 0 anyway */
1264 if (remapped.values[c] <= PA_VOLUME_MUTED)
1265 continue;
1266
1267 /* Don't update the reference ratio unless necessary */
1268 if (pa_sw_volume_multiply(
1269 i->reference_ratio.values[c],
1270 remapped.values[c]) == i->volume.values[c])
1271 continue;
1272
1273 i->reference_ratio.values[c] = pa_sw_volume_divide(
1274 i->volume.values[c],
1275 remapped.values[c]);
1276 }
1277 }
1278 }
1279
1280 /* Called from main context */
1281 static void compute_real_ratios(pa_sink *s) {
1282 pa_sink_input *i;
1283 uint32_t idx;
1284
1285 pa_sink_assert_ref(s);
1286 pa_assert_ctl_context();
1287 pa_assert(PA_SINK_IS_LINKED(s->state));
1288 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1289
1290 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1291 unsigned c;
1292 pa_cvolume remapped;
1293
1294 /*
1295 * This basically calculates:
1296 *
1297 * i->real_ratio := i->volume / s->real_volume
1298 * i->soft_volume := i->real_ratio * i->volume_factor
1299 */
1300
1301 remapped = s->real_volume;
1302 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1303
1304 i->real_ratio.channels = i->sample_spec.channels;
1305 i->soft_volume.channels = i->sample_spec.channels;
1306
1307 for (c = 0; c < i->sample_spec.channels; c++) {
1308
1309 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1310 /* We leave i->real_ratio untouched */
1311 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1312 continue;
1313 }
1314
1315 /* Don't lose accuracy unless necessary */
1316 if (pa_sw_volume_multiply(
1317 i->real_ratio.values[c],
1318 remapped.values[c]) != i->volume.values[c])
1319
1320 i->real_ratio.values[c] = pa_sw_volume_divide(
1321 i->volume.values[c],
1322 remapped.values[c]);
1323
1324 i->soft_volume.values[c] = pa_sw_volume_multiply(
1325 i->real_ratio.values[c],
1326 i->volume_factor.values[c]);
1327 }
1328
1329 /* We don't copy the soft_volume to the thread_info data
1330 * here. That must be done by the caller */
1331 }
1332 }
1333
1334 /* Called from main thread */
1335 static void compute_real_volume(pa_sink *s) {
1336 pa_sink_input *i;
1337 uint32_t idx;
1338
1339 pa_sink_assert_ref(s);
1340 pa_assert_ctl_context();
1341 pa_assert(PA_SINK_IS_LINKED(s->state));
1342 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1343
1344 /* This determines the maximum volume of all streams and sets
1345 * s->real_volume accordingly. */
1346
1347 if (pa_idxset_isempty(s->inputs)) {
1348 /* In the special case that we have no sink input we leave the
1349 * volume unmodified. */
1350 s->real_volume = s->reference_volume;
1351 return;
1352 }
1353
1354 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1355
1356 /* First let's determine the new maximum volume of all inputs
1357 * connected to this sink */
1358 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1359 pa_cvolume remapped;
1360
1361 remapped = i->volume;
1362 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1363 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1364 }
1365
1366 /* Then, let's update the real ratios/soft volumes of all inputs
1367 * connected to this sink */
1368 compute_real_ratios(s);
1369 }
1370
1371 /* Called from main thread */
1372 static void propagate_reference_volume(pa_sink *s) {
1373 pa_sink_input *i;
1374 uint32_t idx;
1375
1376 pa_sink_assert_ref(s);
1377 pa_assert_ctl_context();
1378 pa_assert(PA_SINK_IS_LINKED(s->state));
1379 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1380
1381 /* This is called whenever the sink volume changes that is not
1382 * caused by a sink input volume change. We need to fix up the
1383 * sink input volumes accordingly */
1384
1385 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1386 pa_cvolume old_volume, remapped;
1387
1388 old_volume = i->volume;
1389
1390 /* This basically calculates:
1391 *
1392 * i->volume := s->reference_volume * i->reference_ratio */
1393
1394 remapped = s->reference_volume;
1395 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1396 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1397
1398 /* The volume changed, let's tell people so */
1399 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1400
1401 if (i->volume_changed)
1402 i->volume_changed(i);
1403
1404 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1405 }
1406 }
1407 }
1408
1409 /* Called from main thread */
1410 void pa_sink_set_volume(
1411 pa_sink *s,
1412 const pa_cvolume *volume,
1413 pa_bool_t sendmsg,
1414 pa_bool_t save) {
1415
1416 pa_cvolume old_reference_volume;
1417 pa_bool_t reference_changed;
1418
1419 pa_sink_assert_ref(s);
1420 pa_assert_ctl_context();
1421 pa_assert(PA_SINK_IS_LINKED(s->state));
1422 pa_assert(!volume || pa_cvolume_valid(volume));
1423 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1424 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1425
1426 /* As a special exception we accept mono volumes on all sinks --
1427 * even on those with more complex channel maps */
1428
1429 /* If volume is NULL we synchronize the sink's real and reference
1430 * volumes with the stream volumes. If it is not NULL we update
1431 * the reference_volume with it. */
1432
1433 old_reference_volume = s->reference_volume;
1434
1435 if (volume) {
1436
1437 if (pa_cvolume_compatible(volume, &s->sample_spec))
1438 s->reference_volume = *volume;
1439 else
1440 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1441
1442 if (s->flags & PA_SINK_FLAT_VOLUME) {
1443 /* OK, propagate this volume change back to the inputs */
1444 propagate_reference_volume(s);
1445
1446 /* And now recalculate the real volume */
1447 compute_real_volume(s);
1448 } else
1449 s->real_volume = s->reference_volume;
1450
1451 } else {
1452 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1453
1454 /* Ok, let's determine the new real volume */
1455 compute_real_volume(s);
1456
1457 /* Let's 'push' the reference volume if necessary */
1458 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1459
1460 /* We need to fix the reference ratios of all streams now that
1461 * we changed the reference volume */
1462 compute_reference_ratios(s);
1463 }
1464
1465 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1466 s->save_volume = (!reference_changed && s->save_volume) || save;
1467
1468 if (s->set_volume) {
1469 /* If we have a function set_volume(), then we do not apply a
1470 * soft volume by default. However, set_volume() is free to
1471 * apply one to s->soft_volume */
1472
1473 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1474 s->set_volume(s);
1475
1476 } else
1477 /* If we have no function set_volume(), then the soft volume
1478 * becomes the virtual volume */
1479 s->soft_volume = s->real_volume;
1480
1481 /* This tells the sink that soft and/or virtual volume changed */
1482 if (sendmsg)
1483 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1484
1485 if (reference_changed)
1486 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1487 }
1488
1489 /* Called from main thread. Only to be called by sink implementor */
1490 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1491 pa_sink_assert_ref(s);
1492 pa_assert_ctl_context();
1493
1494 if (!volume)
1495 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1496 else
1497 s->soft_volume = *volume;
1498
1499 if (PA_SINK_IS_LINKED(s->state))
1500 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1501 else
1502 s->thread_info.soft_volume = s->soft_volume;
1503 }
1504
1505 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1506 pa_sink_input *i;
1507 uint32_t idx;
1508 pa_cvolume old_reference_volume;
1509
1510 pa_sink_assert_ref(s);
1511 pa_assert_ctl_context();
1512 pa_assert(PA_SINK_IS_LINKED(s->state));
1513
1514 /* This is called when the hardware's real volume changes due to
1515 * some external event. We copy the real volume into our
1516 * reference volume and then rebuild the stream volumes based on
1517 * i->real_ratio which should stay fixed. */
1518
1519 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1520 return;
1521
1522 old_reference_volume = s->reference_volume;
1523
1524 /* 1. Make the real volume the reference volume */
1525 s->reference_volume = s->real_volume;
1526
1527 if (s->flags & PA_SINK_FLAT_VOLUME) {
1528
1529 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1530 pa_cvolume old_volume, remapped;
1531
1532 old_volume = i->volume;
1533
1534 /* 2. Since the sink's reference and real volumes are equal
1535 * now our ratios should be too. */
1536 i->reference_ratio = i->real_ratio;
1537
1538 /* 3. Recalculate the new stream reference volume based on the
1539 * reference ratio and the sink's reference volume.
1540 *
1541 * This basically calculates:
1542 *
1543 * i->volume = s->reference_volume * i->reference_ratio
1544 *
1545 * This is identical to propagate_reference_volume() */
1546 remapped = s->reference_volume;
1547 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1548 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1549
1550 /* Notify if something changed */
1551 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1552
1553 if (i->volume_changed)
1554 i->volume_changed(i);
1555
1556 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1557 }
1558 }
1559 }
1560
1561 /* Something got changed in the hardware. It probably makes sense
1562 * to save changed hw settings given that hw volume changes not
1563 * triggered by PA are almost certainly done by the user. */
1564 s->save_volume = TRUE;
1565
1566 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1567 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1568 }
1569
1570 /* Called from main thread */
1571 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1572 pa_sink_assert_ref(s);
1573 pa_assert_ctl_context();
1574 pa_assert(PA_SINK_IS_LINKED(s->state));
1575
1576 if (s->refresh_volume || force_refresh) {
1577 struct pa_cvolume old_real_volume;
1578
1579 old_real_volume = s->real_volume;
1580
1581 if (s->get_volume)
1582 s->get_volume(s);
1583
1584 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1585
1586 propagate_real_volume(s, &old_real_volume);
1587 }
1588
1589 return &s->reference_volume;
1590 }
1591
1592 /* Called from main thread */
1593 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1594 pa_cvolume old_real_volume;
1595
1596 pa_sink_assert_ref(s);
1597 pa_assert_ctl_context();
1598 pa_assert(PA_SINK_IS_LINKED(s->state));
1599
1600 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1601
1602 old_real_volume = s->real_volume;
1603 s->real_volume = *new_real_volume;
1604
1605 propagate_real_volume(s, &old_real_volume);
1606 }
1607
1608 /* Called from main thread */
1609 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1610 pa_bool_t old_muted;
1611
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1614 pa_assert(PA_SINK_IS_LINKED(s->state));
1615
1616 old_muted = s->muted;
1617 s->muted = mute;
1618 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1619
1620 if (s->set_mute)
1621 s->set_mute(s);
1622
1623 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1624
1625 if (old_muted != s->muted)
1626 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1627 }
1628
1629 /* Called from main thread */
1630 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1631
1632 pa_sink_assert_ref(s);
1633 pa_assert_ctl_context();
1634 pa_assert(PA_SINK_IS_LINKED(s->state));
1635
1636 if (s->refresh_muted || force_refresh) {
1637 pa_bool_t old_muted = s->muted;
1638
1639 if (s->get_mute)
1640 s->get_mute(s);
1641
1642 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1643
1644 if (old_muted != s->muted) {
1645 s->save_muted = TRUE;
1646
1647 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1648
1649 /* Make sure the soft mute status stays in sync */
1650 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1651 }
1652 }
1653
1654 return s->muted;
1655 }
1656
1657 /* Called from main thread */
1658 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1659 pa_sink_assert_ref(s);
1660 pa_assert_ctl_context();
1661 pa_assert(PA_SINK_IS_LINKED(s->state));
1662
1663 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1664
1665 if (s->muted == new_muted)
1666 return;
1667
1668 s->muted = new_muted;
1669 s->save_muted = TRUE;
1670
1671 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1672 }
1673
1674 /* Called from main thread */
1675 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1676 pa_sink_assert_ref(s);
1677 pa_assert_ctl_context();
1678
1679 if (p)
1680 pa_proplist_update(s->proplist, mode, p);
1681
1682 if (PA_SINK_IS_LINKED(s->state)) {
1683 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1684 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1685 }
1686
1687 return TRUE;
1688 }
1689
1690 /* Called from main thread */
1691 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1692 void pa_sink_set_description(pa_sink *s, const char *description) {
1693 const char *old;
1694 pa_sink_assert_ref(s);
1695 pa_assert_ctl_context();
1696
1697 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1698 return;
1699
1700 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1701
1702 if (old && description && pa_streq(old, description))
1703 return;
1704
1705 if (description)
1706 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1707 else
1708 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1709
1710 if (s->monitor_source) {
1711 char *n;
1712
1713 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1714 pa_source_set_description(s->monitor_source, n);
1715 pa_xfree(n);
1716 }
1717
1718 if (PA_SINK_IS_LINKED(s->state)) {
1719 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1720 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1721 }
1722 }
1723
1724 /* Called from main thread */
1725 unsigned pa_sink_linked_by(pa_sink *s) {
1726 unsigned ret;
1727
1728 pa_sink_assert_ref(s);
1729 pa_assert_ctl_context();
1730 pa_assert(PA_SINK_IS_LINKED(s->state));
1731
1732 ret = pa_idxset_size(s->inputs);
1733
1734 /* We add in the number of streams connected to us here. Please
1735 * note the asymmmetry to pa_sink_used_by()! */
1736
1737 if (s->monitor_source)
1738 ret += pa_source_linked_by(s->monitor_source);
1739
1740 return ret;
1741 }
1742
1743 /* Called from main thread */
1744 unsigned pa_sink_used_by(pa_sink *s) {
1745 unsigned ret;
1746
1747 pa_sink_assert_ref(s);
1748 pa_assert_ctl_context();
1749 pa_assert(PA_SINK_IS_LINKED(s->state));
1750
1751 ret = pa_idxset_size(s->inputs);
1752 pa_assert(ret >= s->n_corked);
1753
1754 /* Streams connected to our monitor source do not matter for
1755 * pa_sink_used_by()!.*/
1756
1757 return ret - s->n_corked;
1758 }
1759
1760 /* Called from main thread */
1761 unsigned pa_sink_check_suspend(pa_sink *s) {
1762 unsigned ret;
1763 pa_sink_input *i;
1764 uint32_t idx;
1765
1766 pa_sink_assert_ref(s);
1767 pa_assert_ctl_context();
1768
1769 if (!PA_SINK_IS_LINKED(s->state))
1770 return 0;
1771
1772 ret = 0;
1773
1774 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1775 pa_sink_input_state_t st;
1776
1777 st = pa_sink_input_get_state(i);
1778 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1779
1780 if (st == PA_SINK_INPUT_CORKED)
1781 continue;
1782
1783 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1784 continue;
1785
1786 ret ++;
1787 }
1788
1789 if (s->monitor_source)
1790 ret += pa_source_check_suspend(s->monitor_source);
1791
1792 return ret;
1793 }
1794
1795 /* Called from the IO thread */
1796 static void sync_input_volumes_within_thread(pa_sink *s) {
1797 pa_sink_input *i;
1798 void *state = NULL;
1799
1800 pa_sink_assert_ref(s);
1801 pa_sink_assert_io_context(s);
1802
1803 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1804 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1805 continue;
1806
1807 i->thread_info.soft_volume = i->soft_volume;
1808 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1809 }
1810 }
1811
1812 /* Called from IO thread, except when it is not */
1813 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1814 pa_sink *s = PA_SINK(o);
1815 pa_sink_assert_ref(s);
1816
1817 switch ((pa_sink_message_t) code) {
1818
1819 case PA_SINK_MESSAGE_ADD_INPUT: {
1820 pa_sink_input *i = PA_SINK_INPUT(userdata);
1821
1822 /* If you change anything here, make sure to change the
1823 * sink input handling a few lines down at
1824 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1825
1826 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1827
1828 /* Since the caller sleeps in pa_sink_input_put(), we can
1829 * safely access data outside of thread_info even though
1830 * it is mutable */
1831
1832 if ((i->thread_info.sync_prev = i->sync_prev)) {
1833 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1834 pa_assert(i->sync_prev->sync_next == i);
1835 i->thread_info.sync_prev->thread_info.sync_next = i;
1836 }
1837
1838 if ((i->thread_info.sync_next = i->sync_next)) {
1839 pa_assert(i->sink == i->thread_info.sync_next->sink);
1840 pa_assert(i->sync_next->sync_prev == i);
1841 i->thread_info.sync_next->thread_info.sync_prev = i;
1842 }
1843
1844 pa_assert(!i->thread_info.attached);
1845 i->thread_info.attached = TRUE;
1846
1847 if (i->attach)
1848 i->attach(i);
1849
1850 pa_sink_input_set_state_within_thread(i, i->state);
1851
1852 /* The requested latency of the sink input needs to be
1853 * fixed up and then configured on the sink */
1854
1855 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1856 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1857
1858 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1859 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1860
1861 /* We don't rewind here automatically. This is left to the
1862 * sink input implementor because some sink inputs need a
1863 * slow start, i.e. need some time to buffer client
1864 * samples before beginning streaming. */
1865
1866 /* In flat volume mode we need to update the volume as
1867 * well */
1868 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1869 }
1870
1871 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1872 pa_sink_input *i = PA_SINK_INPUT(userdata);
1873
1874 /* If you change anything here, make sure to change the
1875 * sink input handling a few lines down at
1876 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1877
1878 if (i->detach)
1879 i->detach(i);
1880
1881 pa_sink_input_set_state_within_thread(i, i->state);
1882
1883 pa_assert(i->thread_info.attached);
1884 i->thread_info.attached = FALSE;
1885
1886 /* Since the caller sleeps in pa_sink_input_unlink(),
1887 * we can safely access data outside of thread_info even
1888 * though it is mutable */
1889
1890 pa_assert(!i->sync_prev);
1891 pa_assert(!i->sync_next);
1892
1893 if (i->thread_info.sync_prev) {
1894 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1895 i->thread_info.sync_prev = NULL;
1896 }
1897
1898 if (i->thread_info.sync_next) {
1899 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1900 i->thread_info.sync_next = NULL;
1901 }
1902
1903 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1904 pa_sink_input_unref(i);
1905
1906 pa_sink_invalidate_requested_latency(s, TRUE);
1907 pa_sink_request_rewind(s, (size_t) -1);
1908
1909 /* In flat volume mode we need to update the volume as
1910 * well */
1911 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1912 }
1913
1914 case PA_SINK_MESSAGE_START_MOVE: {
1915 pa_sink_input *i = PA_SINK_INPUT(userdata);
1916
1917 /* We don't support moving synchronized streams. */
1918 pa_assert(!i->sync_prev);
1919 pa_assert(!i->sync_next);
1920 pa_assert(!i->thread_info.sync_next);
1921 pa_assert(!i->thread_info.sync_prev);
1922
1923 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1924 pa_usec_t usec = 0;
1925 size_t sink_nbytes, total_nbytes;
1926
1927 /* Get the latency of the sink */
1928 usec = pa_sink_get_latency_within_thread(s);
1929 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1930 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1931
1932 if (total_nbytes > 0) {
1933 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1934 i->thread_info.rewrite_flush = TRUE;
1935 pa_sink_input_process_rewind(i, sink_nbytes);
1936 }
1937 }
1938
1939 if (i->detach)
1940 i->detach(i);
1941
1942 pa_assert(i->thread_info.attached);
1943 i->thread_info.attached = FALSE;
1944
1945 /* Let's remove the sink input ...*/
1946 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1947 pa_sink_input_unref(i);
1948
1949 pa_sink_invalidate_requested_latency(s, TRUE);
1950
1951 pa_log_debug("Requesting rewind due to started move");
1952 pa_sink_request_rewind(s, (size_t) -1);
1953
1954 /* In flat volume mode we need to update the volume as
1955 * well */
1956 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1957 }
1958
1959 case PA_SINK_MESSAGE_FINISH_MOVE: {
1960 pa_sink_input *i = PA_SINK_INPUT(userdata);
1961
1962 /* We don't support moving synchronized streams. */
1963 pa_assert(!i->sync_prev);
1964 pa_assert(!i->sync_next);
1965 pa_assert(!i->thread_info.sync_next);
1966 pa_assert(!i->thread_info.sync_prev);
1967
1968 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1969
1970 pa_assert(!i->thread_info.attached);
1971 i->thread_info.attached = TRUE;
1972
1973 if (i->attach)
1974 i->attach(i);
1975
1976 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1977 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1978
1979 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1980 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1981
1982 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1983 pa_usec_t usec = 0;
1984 size_t nbytes;
1985
1986 /* Get the latency of the sink */
1987 usec = pa_sink_get_latency_within_thread(s);
1988 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1989
1990 if (nbytes > 0)
1991 pa_sink_input_drop(i, nbytes);
1992
1993 pa_log_debug("Requesting rewind due to finished move");
1994 pa_sink_request_rewind(s, nbytes);
1995 }
1996
1997 /* In flat volume mode we need to update the volume as
1998 * well */
1999 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
2000 }
2001
2002 case PA_SINK_MESSAGE_SET_VOLUME:
2003
2004 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
2005 s->thread_info.soft_volume = s->soft_volume;
2006 pa_sink_request_rewind(s, (size_t) -1);
2007 }
2008
2009 if (!(s->flags & PA_SINK_FLAT_VOLUME))
2010 return 0;
2011
2012 /* Fall through ... */
2013
2014 case PA_SINK_MESSAGE_SYNC_VOLUMES:
2015 sync_input_volumes_within_thread(s);
2016 return 0;
2017
2018 case PA_SINK_MESSAGE_GET_VOLUME:
2019 return 0;
2020
2021 case PA_SINK_MESSAGE_SET_MUTE:
2022
2023 if (s->thread_info.soft_muted != s->muted) {
2024 s->thread_info.soft_muted = s->muted;
2025 pa_sink_request_rewind(s, (size_t) -1);
2026 }
2027
2028 return 0;
2029
2030 case PA_SINK_MESSAGE_GET_MUTE:
2031 return 0;
2032
2033 case PA_SINK_MESSAGE_SET_STATE: {
2034
2035 pa_bool_t suspend_change =
2036 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2037 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2038
2039 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2040
2041 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2042 s->thread_info.rewind_nbytes = 0;
2043 s->thread_info.rewind_requested = FALSE;
2044 }
2045
2046 if (suspend_change) {
2047 pa_sink_input *i;
2048 void *state = NULL;
2049
2050 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2051 if (i->suspend_within_thread)
2052 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2053 }
2054
2055 return 0;
2056 }
2057
2058 case PA_SINK_MESSAGE_DETACH:
2059
2060 /* Detach all streams */
2061 pa_sink_detach_within_thread(s);
2062 return 0;
2063
2064 case PA_SINK_MESSAGE_ATTACH:
2065
2066 /* Reattach all streams */
2067 pa_sink_attach_within_thread(s);
2068 return 0;
2069
2070 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2071
2072 pa_usec_t *usec = userdata;
2073 *usec = pa_sink_get_requested_latency_within_thread(s);
2074
2075 /* Yes, that's right, the IO thread will see -1 when no
2076 * explicit requested latency is configured, the main
2077 * thread will see max_latency */
2078 if (*usec == (pa_usec_t) -1)
2079 *usec = s->thread_info.max_latency;
2080
2081 return 0;
2082 }
2083
2084 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2085 pa_usec_t *r = userdata;
2086
2087 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2088
2089 return 0;
2090 }
2091
2092 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2093 pa_usec_t *r = userdata;
2094
2095 r[0] = s->thread_info.min_latency;
2096 r[1] = s->thread_info.max_latency;
2097
2098 return 0;
2099 }
2100
2101 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2102
2103 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2104 return 0;
2105
2106 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2107
2108 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2109 return 0;
2110
2111 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2112
2113 *((size_t*) userdata) = s->thread_info.max_rewind;
2114 return 0;
2115
2116 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2117
2118 *((size_t*) userdata) = s->thread_info.max_request;
2119 return 0;
2120
2121 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2122
2123 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2124 return 0;
2125
2126 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2127
2128 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2129 return 0;
2130
2131 case PA_SINK_MESSAGE_GET_LATENCY:
2132 case PA_SINK_MESSAGE_MAX:
2133 ;
2134 }
2135
2136 return -1;
2137 }
2138
2139 /* Called from main thread */
2140 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2141 pa_sink *sink;
2142 uint32_t idx;
2143 int ret = 0;
2144
2145 pa_core_assert_ref(c);
2146 pa_assert_ctl_context();
2147 pa_assert(cause != 0);
2148
2149 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2150 int r;
2151
2152 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2153 ret = r;
2154 }
2155
2156 return ret;
2157 }
2158
2159 /* Called from main thread */
2160 void pa_sink_detach(pa_sink *s) {
2161 pa_sink_assert_ref(s);
2162 pa_assert_ctl_context();
2163 pa_assert(PA_SINK_IS_LINKED(s->state));
2164
2165 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2166 }
2167
2168 /* Called from main thread */
2169 void pa_sink_attach(pa_sink *s) {
2170 pa_sink_assert_ref(s);
2171 pa_assert_ctl_context();
2172 pa_assert(PA_SINK_IS_LINKED(s->state));
2173
2174 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2175 }
2176
2177 /* Called from IO thread */
2178 void pa_sink_detach_within_thread(pa_sink *s) {
2179 pa_sink_input *i;
2180 void *state = NULL;
2181
2182 pa_sink_assert_ref(s);
2183 pa_sink_assert_io_context(s);
2184 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2185
2186 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2187 if (i->detach)
2188 i->detach(i);
2189
2190 if (s->monitor_source)
2191 pa_source_detach_within_thread(s->monitor_source);
2192 }
2193
2194 /* Called from IO thread */
2195 void pa_sink_attach_within_thread(pa_sink *s) {
2196 pa_sink_input *i;
2197 void *state = NULL;
2198
2199 pa_sink_assert_ref(s);
2200 pa_sink_assert_io_context(s);
2201 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2202
2203 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2204 if (i->attach)
2205 i->attach(i);
2206
2207 if (s->monitor_source)
2208 pa_source_attach_within_thread(s->monitor_source);
2209 }
2210
2211 /* Called from IO thread */
2212 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2213 pa_sink_assert_ref(s);
2214 pa_sink_assert_io_context(s);
2215 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2216
2217 if (s->thread_info.state == PA_SINK_SUSPENDED)
2218 return;
2219
2220 if (nbytes == (size_t) -1)
2221 nbytes = s->thread_info.max_rewind;
2222
2223 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2224
2225 if (s->thread_info.rewind_requested &&
2226 nbytes <= s->thread_info.rewind_nbytes)
2227 return;
2228
2229 s->thread_info.rewind_nbytes = nbytes;
2230 s->thread_info.rewind_requested = TRUE;
2231
2232 if (s->request_rewind)
2233 s->request_rewind(s);
2234 }
2235
2236 /* Called from IO thread */
2237 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2238 pa_usec_t result = (pa_usec_t) -1;
2239 pa_sink_input *i;
2240 void *state = NULL;
2241 pa_usec_t monitor_latency;
2242
2243 pa_sink_assert_ref(s);
2244 pa_sink_assert_io_context(s);
2245
2246 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2247 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2248
2249 if (s->thread_info.requested_latency_valid)
2250 return s->thread_info.requested_latency;
2251
2252 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2253 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2254 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2255 result = i->thread_info.requested_sink_latency;
2256
2257 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2258
2259 if (monitor_latency != (pa_usec_t) -1 &&
2260 (result == (pa_usec_t) -1 || result > monitor_latency))
2261 result = monitor_latency;
2262
2263 if (result != (pa_usec_t) -1)
2264 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2265
2266 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2267 /* Only cache if properly initialized */
2268 s->thread_info.requested_latency = result;
2269 s->thread_info.requested_latency_valid = TRUE;
2270 }
2271
2272 return result;
2273 }
2274
2275 /* Called from main thread */
2276 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2277 pa_usec_t usec = 0;
2278
2279 pa_sink_assert_ref(s);
2280 pa_assert_ctl_context();
2281 pa_assert(PA_SINK_IS_LINKED(s->state));
2282
2283 if (s->state == PA_SINK_SUSPENDED)
2284 return 0;
2285
2286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2287 return usec;
2288 }
2289
2290 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2291 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2292 pa_sink_input *i;
2293 void *state = NULL;
2294
2295 pa_sink_assert_ref(s);
2296 pa_sink_assert_io_context(s);
2297
2298 if (max_rewind == s->thread_info.max_rewind)
2299 return;
2300
2301 s->thread_info.max_rewind = max_rewind;
2302
2303 if (PA_SINK_IS_LINKED(s->thread_info.state))
2304 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2305 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2306
2307 if (s->monitor_source)
2308 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2309 }
2310
2311 /* Called from main thread */
2312 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2313 pa_sink_assert_ref(s);
2314 pa_assert_ctl_context();
2315
2316 if (PA_SINK_IS_LINKED(s->state))
2317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2318 else
2319 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2320 }
2321
2322 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2323 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2324 void *state = NULL;
2325
2326 pa_sink_assert_ref(s);
2327 pa_sink_assert_io_context(s);
2328
2329 if (max_request == s->thread_info.max_request)
2330 return;
2331
2332 s->thread_info.max_request = max_request;
2333
2334 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2335 pa_sink_input *i;
2336
2337 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2338 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2339 }
2340 }
2341
2342 /* Called from main thread */
2343 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2344 pa_sink_assert_ref(s);
2345 pa_assert_ctl_context();
2346
2347 if (PA_SINK_IS_LINKED(s->state))
2348 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2349 else
2350 pa_sink_set_max_request_within_thread(s, max_request);
2351 }
2352
2353 /* Called from IO thread */
2354 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2355 pa_sink_input *i;
2356 void *state = NULL;
2357
2358 pa_sink_assert_ref(s);
2359 pa_sink_assert_io_context(s);
2360
2361 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2362 s->thread_info.requested_latency_valid = FALSE;
2363 else if (dynamic)
2364 return;
2365
2366 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2367
2368 if (s->update_requested_latency)
2369 s->update_requested_latency(s);
2370
2371 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2372 if (i->update_sink_requested_latency)
2373 i->update_sink_requested_latency(i);
2374 }
2375 }
2376
2377 /* Called from main thread */
2378 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2379 pa_sink_assert_ref(s);
2380 pa_assert_ctl_context();
2381
2382 /* min_latency == 0: no limit
2383 * min_latency anything else: specified limit
2384 *
2385 * Similar for max_latency */
2386
2387 if (min_latency < ABSOLUTE_MIN_LATENCY)
2388 min_latency = ABSOLUTE_MIN_LATENCY;
2389
2390 if (max_latency <= 0 ||
2391 max_latency > ABSOLUTE_MAX_LATENCY)
2392 max_latency = ABSOLUTE_MAX_LATENCY;
2393
2394 pa_assert(min_latency <= max_latency);
2395
2396 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2397 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2398 max_latency == ABSOLUTE_MAX_LATENCY) ||
2399 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2400
2401 if (PA_SINK_IS_LINKED(s->state)) {
2402 pa_usec_t r[2];
2403
2404 r[0] = min_latency;
2405 r[1] = max_latency;
2406
2407 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2408 } else
2409 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2410 }
2411
2412 /* Called from main thread */
2413 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2414 pa_sink_assert_ref(s);
2415 pa_assert_ctl_context();
2416 pa_assert(min_latency);
2417 pa_assert(max_latency);
2418
2419 if (PA_SINK_IS_LINKED(s->state)) {
2420 pa_usec_t r[2] = { 0, 0 };
2421
2422 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2423
2424 *min_latency = r[0];
2425 *max_latency = r[1];
2426 } else {
2427 *min_latency = s->thread_info.min_latency;
2428 *max_latency = s->thread_info.max_latency;
2429 }
2430 }
2431
2432 /* Called from IO thread */
2433 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2434 pa_sink_assert_ref(s);
2435 pa_sink_assert_io_context(s);
2436
2437 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2438 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2439 pa_assert(min_latency <= max_latency);
2440
2441 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2442 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2443 max_latency == ABSOLUTE_MAX_LATENCY) ||
2444 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2445
2446 if (s->thread_info.min_latency == min_latency &&
2447 s->thread_info.max_latency == max_latency)
2448 return;
2449
2450 s->thread_info.min_latency = min_latency;
2451 s->thread_info.max_latency = max_latency;
2452
2453 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2454 pa_sink_input *i;
2455 void *state = NULL;
2456
2457 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2458 if (i->update_sink_latency_range)
2459 i->update_sink_latency_range(i);
2460 }
2461
2462 pa_sink_invalidate_requested_latency(s, FALSE);
2463
2464 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2465 }
2466
2467 /* Called from main thread */
2468 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2469 pa_sink_assert_ref(s);
2470 pa_assert_ctl_context();
2471
2472 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2473 pa_assert(latency == 0);
2474 return;
2475 }
2476
2477 if (latency < ABSOLUTE_MIN_LATENCY)
2478 latency = ABSOLUTE_MIN_LATENCY;
2479
2480 if (latency > ABSOLUTE_MAX_LATENCY)
2481 latency = ABSOLUTE_MAX_LATENCY;
2482
2483 if (PA_SINK_IS_LINKED(s->state))
2484 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2485 else
2486 s->thread_info.fixed_latency = latency;
2487
2488 pa_source_set_fixed_latency(s->monitor_source, latency);
2489 }
2490
2491 /* Called from main thread */
2492 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2493 pa_usec_t latency;
2494
2495 pa_sink_assert_ref(s);
2496 pa_assert_ctl_context();
2497
2498 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2499 return 0;
2500
2501 if (PA_SINK_IS_LINKED(s->state))
2502 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2503 else
2504 latency = s->thread_info.fixed_latency;
2505
2506 return latency;
2507 }
2508
2509 /* Called from IO thread */
2510 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2511 pa_sink_assert_ref(s);
2512 pa_sink_assert_io_context(s);
2513
2514 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2515 pa_assert(latency == 0);
2516 return;
2517 }
2518
2519 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2520 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2521
2522 if (s->thread_info.fixed_latency == latency)
2523 return;
2524
2525 s->thread_info.fixed_latency = latency;
2526
2527 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2528 pa_sink_input *i;
2529 void *state = NULL;
2530
2531 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2532 if (i->update_sink_fixed_latency)
2533 i->update_sink_fixed_latency(i);
2534 }
2535
2536 pa_sink_invalidate_requested_latency(s, FALSE);
2537
2538 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2539 }
2540
2541 /* Called from main context */
2542 size_t pa_sink_get_max_rewind(pa_sink *s) {
2543 size_t r;
2544 pa_sink_assert_ref(s);
2545 pa_assert_ctl_context();
2546
2547 if (!PA_SINK_IS_LINKED(s->state))
2548 return s->thread_info.max_rewind;
2549
2550 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2551
2552 return r;
2553 }
2554
2555 /* Called from main context */
2556 size_t pa_sink_get_max_request(pa_sink *s) {
2557 size_t r;
2558 pa_sink_assert_ref(s);
2559 pa_assert_ctl_context();
2560
2561 if (!PA_SINK_IS_LINKED(s->state))
2562 return s->thread_info.max_request;
2563
2564 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2565
2566 return r;
2567 }
2568
2569 /* Called from main context */
2570 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2571 pa_device_port *port;
2572
2573 pa_sink_assert_ref(s);
2574 pa_assert_ctl_context();
2575
2576 if (!s->set_port) {
2577 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2578 return -PA_ERR_NOTIMPLEMENTED;
2579 }
2580
2581 if (!s->ports)
2582 return -PA_ERR_NOENTITY;
2583
2584 if (!(port = pa_hashmap_get(s->ports, name)))
2585 return -PA_ERR_NOENTITY;
2586
2587 if (s->active_port == port) {
2588 s->save_port = s->save_port || save;
2589 return 0;
2590 }
2591
2592 if ((s->set_port(s, port)) < 0)
2593 return -PA_ERR_NOENTITY;
2594
2595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2596
2597 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2598
2599 s->active_port = port;
2600 s->save_port = save;
2601
2602 return 0;
2603 }
2604
2605 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2606 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2607
2608 pa_assert(p);
2609
2610 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2611 return TRUE;
2612
2613 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2614
2615 if (pa_streq(ff, "microphone"))
2616 t = "audio-input-microphone";
2617 else if (pa_streq(ff, "webcam"))
2618 t = "camera-web";
2619 else if (pa_streq(ff, "computer"))
2620 t = "computer";
2621 else if (pa_streq(ff, "handset"))
2622 t = "phone";
2623 else if (pa_streq(ff, "portable"))
2624 t = "multimedia-player";
2625 else if (pa_streq(ff, "tv"))
2626 t = "video-display";
2627
2628 /*
2629 * The following icons are not part of the icon naming spec,
2630 * because Rodney Dawes sucks as the maintainer of that spec.
2631 *
2632 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2633 */
2634 else if (pa_streq(ff, "headset"))
2635 t = "audio-headset";
2636 else if (pa_streq(ff, "headphone"))
2637 t = "audio-headphones";
2638 else if (pa_streq(ff, "speaker"))
2639 t = "audio-speakers";
2640 else if (pa_streq(ff, "hands-free"))
2641 t = "audio-handsfree";
2642 }
2643
2644 if (!t)
2645 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2646 if (pa_streq(c, "modem"))
2647 t = "modem";
2648
2649 if (!t) {
2650 if (is_sink)
2651 t = "audio-card";
2652 else
2653 t = "audio-input-microphone";
2654 }
2655
2656 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2657 if (strstr(profile, "analog"))
2658 s = "-analog";
2659 else if (strstr(profile, "iec958"))
2660 s = "-iec958";
2661 else if (strstr(profile, "hdmi"))
2662 s = "-hdmi";
2663 }
2664
2665 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2666
2667 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2668
2669 return TRUE;
2670 }
2671
2672 pa_bool_t pa_device_init_description(pa_proplist *p) {
2673 const char *s, *d = NULL, *k;
2674 pa_assert(p);
2675
2676 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2677 return TRUE;
2678
2679 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2680 if (pa_streq(s, "internal"))
2681 d = _("Internal Audio");
2682
2683 if (!d)
2684 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2685 if (pa_streq(s, "modem"))
2686 d = _("Modem");
2687
2688 if (!d)
2689 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2690
2691 if (!d)
2692 return FALSE;
2693
2694 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2695
2696 if (d && k)
2697 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2698 else if (d)
2699 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2700
2701 return TRUE;
2702 }
2703
2704 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2705 const char *s;
2706 pa_assert(p);
2707
2708 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2709 return TRUE;
2710
2711 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2712 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2713 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2714 return TRUE;
2715 }
2716
2717 return FALSE;
2718 }
2719
2720 unsigned pa_device_init_priority(pa_proplist *p) {
2721 const char *s;
2722 unsigned priority = 0;
2723
2724 pa_assert(p);
2725
2726 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2727
2728 if (pa_streq(s, "sound"))
2729 priority += 9000;
2730 else if (!pa_streq(s, "modem"))
2731 priority += 1000;
2732 }
2733
2734 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2735
2736 if (pa_streq(s, "internal"))
2737 priority += 900;
2738 else if (pa_streq(s, "speaker"))
2739 priority += 500;
2740 else if (pa_streq(s, "headphone"))
2741 priority += 400;
2742 }
2743
2744 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2745
2746 if (pa_streq(s, "pci"))
2747 priority += 50;
2748 else if (pa_streq(s, "usb"))
2749 priority += 40;
2750 else if (pa_streq(s, "bluetooth"))
2751 priority += 30;
2752 }
2753
2754 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2755
2756 if (pa_startswith(s, "analog-"))
2757 priority += 9;
2758 else if (pa_startswith(s, "iec958-"))
2759 priority += 8;
2760 }
2761
2762 return priority;
2763 }