]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: rework how stream volumes affect sink volumes
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->priority = pa_device_init_priority(s->proplist);
248
249 s->sample_spec = data->sample_spec;
250 s->channel_map = data->channel_map;
251
252 s->inputs = pa_idxset_new(NULL, NULL);
253 s->n_corked = 0;
254
255 s->reference_volume = s->real_volume = data->volume;
256 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
257 s->base_volume = PA_VOLUME_NORM;
258 s->n_volume_steps = PA_VOLUME_NORM+1;
259 s->muted = data->muted;
260 s->refresh_volume = s->refresh_muted = FALSE;
261
262 reset_callbacks(s);
263 s->userdata = NULL;
264
265 s->asyncmsgq = NULL;
266
267 /* As a minor optimization we just steal the list instead of
268 * copying it here */
269 s->ports = data->ports;
270 data->ports = NULL;
271
272 s->active_port = NULL;
273 s->save_port = FALSE;
274
275 if (data->active_port && s->ports)
276 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
277 s->save_port = data->save_port;
278
279 if (!s->active_port && s->ports) {
280 void *state;
281 pa_device_port *p;
282
283 PA_HASHMAP_FOREACH(p, s->ports, state)
284 if (!s->active_port || p->priority > s->active_port->priority)
285 s->active_port = p;
286 }
287
288 s->save_volume = data->save_volume;
289 s->save_muted = data->save_muted;
290
291 pa_silence_memchunk_get(
292 &core->silence_cache,
293 core->mempool,
294 &s->silence,
295 &s->sample_spec,
296 0);
297
298 s->thread_info.rtpoll = NULL;
299 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
300 s->thread_info.soft_volume = s->soft_volume;
301 s->thread_info.soft_muted = s->muted;
302 s->thread_info.state = s->state;
303 s->thread_info.rewind_nbytes = 0;
304 s->thread_info.rewind_requested = FALSE;
305 s->thread_info.max_rewind = 0;
306 s->thread_info.max_request = 0;
307 s->thread_info.requested_latency_valid = FALSE;
308 s->thread_info.requested_latency = 0;
309 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
310 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
311 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
312
313 /* FIXME: This should probably be moved to pa_sink_put() */
314 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
315
316 if (s->card)
317 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
318
319 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
320 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
321 s->index,
322 s->name,
323 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
324 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
325 pt);
326 pa_xfree(pt);
327
328 pa_source_new_data_init(&source_data);
329 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
330 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
331 source_data.name = pa_sprintf_malloc("%s.monitor", name);
332 source_data.driver = data->driver;
333 source_data.module = data->module;
334 source_data.card = data->card;
335
336 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
337 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
338 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
339
340 s->monitor_source = pa_source_new(core, &source_data,
341 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
342 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
343
344 pa_source_new_data_done(&source_data);
345
346 if (!s->monitor_source) {
347 pa_sink_unlink(s);
348 pa_sink_unref(s);
349 return NULL;
350 }
351
352 s->monitor_source->monitor_of = s;
353
354 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
355 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
356 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
357
358 return s;
359 }
360
361 /* Called from main context */
362 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
363 int ret;
364 pa_bool_t suspend_change;
365 pa_sink_state_t original_state;
366
367 pa_assert(s);
368 pa_assert_ctl_context();
369
370 if (s->state == state)
371 return 0;
372
373 original_state = s->state;
374
375 suspend_change =
376 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
377 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
378
379 if (s->set_state)
380 if ((ret = s->set_state(s, state)) < 0)
381 return ret;
382
383 if (s->asyncmsgq)
384 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
385
386 if (s->set_state)
387 s->set_state(s, original_state);
388
389 return ret;
390 }
391
392 s->state = state;
393
394 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
395 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
396 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
397 }
398
399 if (suspend_change) {
400 pa_sink_input *i;
401 uint32_t idx;
402
403 /* We're suspending or resuming, tell everyone about it */
404
405 PA_IDXSET_FOREACH(i, s->inputs, idx)
406 if (s->state == PA_SINK_SUSPENDED &&
407 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
408 pa_sink_input_kill(i);
409 else if (i->suspend)
410 i->suspend(i, state == PA_SINK_SUSPENDED);
411
412 if (s->monitor_source)
413 pa_source_sync_suspend(s->monitor_source);
414 }
415
416 return 0;
417 }
418
419 /* Called from main context */
420 void pa_sink_put(pa_sink* s) {
421 pa_sink_assert_ref(s);
422 pa_assert_ctl_context();
423
424 pa_assert(s->state == PA_SINK_INIT);
425
426 /* The following fields must be initialized properly when calling _put() */
427 pa_assert(s->asyncmsgq);
428 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
429
430 /* Generally, flags should be initialized via pa_sink_new(). As a
431 * special exception we allow volume related flags to be set
432 * between _new() and _put(). */
433
434 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
435 s->flags |= PA_SINK_DECIBEL_VOLUME;
436
437 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
438 s->flags |= PA_SINK_FLAT_VOLUME;
439
440 /* We assume that if the sink implementor changed the default
441 * volume he did so in real_volume, because that is the usual
442 * place where he is supposed to place his changes. */
443 s->reference_volume = s->real_volume;
444
445 s->thread_info.soft_volume = s->soft_volume;
446 s->thread_info.soft_muted = s->muted;
447
448 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
449 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
451 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
452 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
453
454 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
455 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
456 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
457
458 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
459
460 pa_source_put(s->monitor_source);
461
462 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
463 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
464 }
465
466 /* Called from main context */
467 void pa_sink_unlink(pa_sink* s) {
468 pa_bool_t linked;
469 pa_sink_input *i, *j = NULL;
470
471 pa_assert(s);
472 pa_assert_ctl_context();
473
474 /* Please note that pa_sink_unlink() does more than simply
475 * reversing pa_sink_put(). It also undoes the registrations
476 * already done in pa_sink_new()! */
477
478 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
479 * may be called multiple times on the same sink without bad
480 * effects. */
481
482 linked = PA_SINK_IS_LINKED(s->state);
483
484 if (linked)
485 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
486
487 if (s->state != PA_SINK_UNLINKED)
488 pa_namereg_unregister(s->core, s->name);
489 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
490
491 if (s->card)
492 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
493
494 while ((i = pa_idxset_first(s->inputs, NULL))) {
495 pa_assert(i != j);
496 pa_sink_input_kill(i);
497 j = i;
498 }
499
500 if (linked)
501 sink_set_state(s, PA_SINK_UNLINKED);
502 else
503 s->state = PA_SINK_UNLINKED;
504
505 reset_callbacks(s);
506
507 if (s->monitor_source)
508 pa_source_unlink(s->monitor_source);
509
510 if (linked) {
511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
512 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
513 }
514 }
515
516 /* Called from main context */
517 static void sink_free(pa_object *o) {
518 pa_sink *s = PA_SINK(o);
519 pa_sink_input *i;
520
521 pa_assert(s);
522 pa_assert_ctl_context();
523 pa_assert(pa_sink_refcnt(s) == 0);
524
525 if (PA_SINK_IS_LINKED(s->state))
526 pa_sink_unlink(s);
527
528 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
529
530 if (s->monitor_source) {
531 pa_source_unref(s->monitor_source);
532 s->monitor_source = NULL;
533 }
534
535 pa_idxset_free(s->inputs, NULL, NULL);
536
537 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
538 pa_sink_input_unref(i);
539
540 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
541
542 if (s->silence.memblock)
543 pa_memblock_unref(s->silence.memblock);
544
545 pa_xfree(s->name);
546 pa_xfree(s->driver);
547
548 if (s->proplist)
549 pa_proplist_free(s->proplist);
550
551 if (s->ports) {
552 pa_device_port *p;
553
554 while ((p = pa_hashmap_steal_first(s->ports)))
555 pa_device_port_free(p);
556
557 pa_hashmap_free(s->ports, NULL, NULL);
558 }
559
560 pa_xfree(s);
561 }
562
563 /* Called from main context, and not while the IO thread is active, please */
564 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
565 pa_sink_assert_ref(s);
566 pa_assert_ctl_context();
567
568 s->asyncmsgq = q;
569
570 if (s->monitor_source)
571 pa_source_set_asyncmsgq(s->monitor_source, q);
572 }
573
574 /* Called from main context, and not while the IO thread is active, please */
575 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
576 pa_sink_assert_ref(s);
577 pa_assert_ctl_context();
578
579 if (mask == 0)
580 return;
581
582 /* For now, allow only a minimal set of flags to be changed. */
583 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
584
585 s->flags = (s->flags & ~mask) | (value & mask);
586
587 pa_source_update_flags(s->monitor_source,
588 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
590 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
591 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
592 }
593
594 /* Called from IO context, or before _put() from main context */
595 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
596 pa_sink_assert_ref(s);
597 pa_sink_assert_io_context(s);
598
599 s->thread_info.rtpoll = p;
600
601 if (s->monitor_source)
602 pa_source_set_rtpoll(s->monitor_source, p);
603 }
604
605 /* Called from main context */
606 int pa_sink_update_status(pa_sink*s) {
607 pa_sink_assert_ref(s);
608 pa_assert_ctl_context();
609 pa_assert(PA_SINK_IS_LINKED(s->state));
610
611 if (s->state == PA_SINK_SUSPENDED)
612 return 0;
613
614 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
615 }
616
617 /* Called from main context */
618 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622 pa_assert(cause != 0);
623
624 if (suspend) {
625 s->suspend_cause |= cause;
626 s->monitor_source->suspend_cause |= cause;
627 } else {
628 s->suspend_cause &= ~cause;
629 s->monitor_source->suspend_cause &= ~cause;
630 }
631
632 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
633 return 0;
634
635 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
636
637 if (s->suspend_cause)
638 return sink_set_state(s, PA_SINK_SUSPENDED);
639 else
640 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
641 }
642
643 /* Called from main context */
644 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
645 pa_sink_input *i, *n;
646 uint32_t idx;
647
648 pa_sink_assert_ref(s);
649 pa_assert_ctl_context();
650 pa_assert(PA_SINK_IS_LINKED(s->state));
651
652 if (!q)
653 q = pa_queue_new();
654
655 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
656 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
657
658 pa_sink_input_ref(i);
659
660 if (pa_sink_input_start_move(i) >= 0)
661 pa_queue_push(q, i);
662 else
663 pa_sink_input_unref(i);
664 }
665
666 return q;
667 }
668
669 /* Called from main context */
670 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
671 pa_sink_input *i;
672
673 pa_sink_assert_ref(s);
674 pa_assert_ctl_context();
675 pa_assert(PA_SINK_IS_LINKED(s->state));
676 pa_assert(q);
677
678 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
679 if (pa_sink_input_finish_move(i, s, save) < 0)
680 pa_sink_input_fail_move(i);
681
682 pa_sink_input_unref(i);
683 }
684
685 pa_queue_free(q, NULL, NULL);
686 }
687
688 /* Called from main context */
689 void pa_sink_move_all_fail(pa_queue *q) {
690 pa_sink_input *i;
691
692 pa_assert_ctl_context();
693 pa_assert(q);
694
695 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
696 pa_sink_input_fail_move(i);
697 pa_sink_input_unref(i);
698 }
699
700 pa_queue_free(q, NULL, NULL);
701 }
702
703 /* Called from IO thread context */
704 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
705 pa_sink_input *i;
706 void *state = NULL;
707
708 pa_sink_assert_ref(s);
709 pa_sink_assert_io_context(s);
710 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
711
712 /* If nobody requested this and this is actually no real rewind
713 * then we can short cut this. Please note that this means that
714 * not all rewind requests triggered upstream will always be
715 * translated in actual requests! */
716 if (!s->thread_info.rewind_requested && nbytes <= 0)
717 return;
718
719 s->thread_info.rewind_nbytes = 0;
720 s->thread_info.rewind_requested = FALSE;
721
722 if (s->thread_info.state == PA_SINK_SUSPENDED)
723 return;
724
725 if (nbytes > 0)
726 pa_log_debug("Processing rewind...");
727
728 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
729 pa_sink_input_assert_ref(i);
730 pa_sink_input_process_rewind(i, nbytes);
731 }
732
733 if (nbytes > 0)
734 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
735 pa_source_process_rewind(s->monitor_source, nbytes);
736 }
737
738 /* Called from IO thread context */
739 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
740 pa_sink_input *i;
741 unsigned n = 0;
742 void *state = NULL;
743 size_t mixlength = *length;
744
745 pa_sink_assert_ref(s);
746 pa_sink_assert_io_context(s);
747 pa_assert(info);
748
749 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
750 pa_sink_input_assert_ref(i);
751
752 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
753
754 if (mixlength == 0 || info->chunk.length < mixlength)
755 mixlength = info->chunk.length;
756
757 if (pa_memblock_is_silence(info->chunk.memblock)) {
758 pa_memblock_unref(info->chunk.memblock);
759 continue;
760 }
761
762 info->userdata = pa_sink_input_ref(i);
763
764 pa_assert(info->chunk.memblock);
765 pa_assert(info->chunk.length > 0);
766
767 info++;
768 n++;
769 maxinfo--;
770 }
771
772 if (mixlength > 0)
773 *length = mixlength;
774
775 return n;
776 }
777
778 /* Called from IO thread context */
779 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
780 pa_sink_input *i;
781 void *state;
782 unsigned p = 0;
783 unsigned n_unreffed = 0;
784
785 pa_sink_assert_ref(s);
786 pa_sink_assert_io_context(s);
787 pa_assert(result);
788 pa_assert(result->memblock);
789 pa_assert(result->length > 0);
790
791 /* We optimize for the case where the order of the inputs has not changed */
792
793 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
794 unsigned j;
795 pa_mix_info* m = NULL;
796
797 pa_sink_input_assert_ref(i);
798
799 /* Let's try to find the matching entry info the pa_mix_info array */
800 for (j = 0; j < n; j ++) {
801
802 if (info[p].userdata == i) {
803 m = info + p;
804 break;
805 }
806
807 p++;
808 if (p >= n)
809 p = 0;
810 }
811
812 /* Drop read data */
813 pa_sink_input_drop(i, result->length);
814
815 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
816
817 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
818 void *ostate = NULL;
819 pa_source_output *o;
820 pa_memchunk c;
821
822 if (m && m->chunk.memblock) {
823 c = m->chunk;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827
828 pa_memchunk_make_writable(&c, 0);
829 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
830 } else {
831 c = s->silence;
832 pa_memblock_ref(c.memblock);
833 pa_assert(result->length <= c.length);
834 c.length = result->length;
835 }
836
837 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
838 pa_source_output_assert_ref(o);
839 pa_assert(o->direct_on_input == i);
840 pa_source_post_direct(s->monitor_source, o, &c);
841 }
842
843 pa_memblock_unref(c.memblock);
844 }
845 }
846
847 if (m) {
848 if (m->chunk.memblock)
849 pa_memblock_unref(m->chunk.memblock);
850 pa_memchunk_reset(&m->chunk);
851
852 pa_sink_input_unref(m->userdata);
853 m->userdata = NULL;
854
855 n_unreffed += 1;
856 }
857 }
858
859 /* Now drop references to entries that are included in the
860 * pa_mix_info array but don't exist anymore */
861
862 if (n_unreffed < n) {
863 for (; n > 0; info++, n--) {
864 if (info->userdata)
865 pa_sink_input_unref(info->userdata);
866 if (info->chunk.memblock)
867 pa_memblock_unref(info->chunk.memblock);
868 }
869 }
870
871 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
872 pa_source_post(s->monitor_source, result);
873 }
874
875 /* Called from IO thread context */
876 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
877 pa_mix_info info[MAX_MIX_CHANNELS];
878 unsigned n;
879 size_t block_size_max;
880
881 pa_sink_assert_ref(s);
882 pa_sink_assert_io_context(s);
883 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
884 pa_assert(pa_frame_aligned(length, &s->sample_spec));
885 pa_assert(result);
886
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
889
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
895 }
896
897 pa_sink_ref(s);
898
899 if (length <= 0)
900 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
901
902 block_size_max = pa_mempool_block_size_max(s->core->mempool);
903 if (length > block_size_max)
904 length = pa_frame_align(block_size_max, &s->sample_spec);
905
906 pa_assert(length > 0);
907
908 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
909
910 if (n == 0) {
911
912 *result = s->silence;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 } else if (n == 1) {
919 pa_cvolume volume;
920
921 *result = info[0].chunk;
922 pa_memblock_ref(result->memblock);
923
924 if (result->length > length)
925 result->length = length;
926
927 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
928
929 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
930 pa_memblock_unref(result->memblock);
931 pa_silence_memchunk_get(&s->core->silence_cache,
932 s->core->mempool,
933 result,
934 &s->sample_spec,
935 result->length);
936 } else if (!pa_cvolume_is_norm(&volume)) {
937 pa_memchunk_make_writable(result, 0);
938 pa_volume_memchunk(result, &s->sample_spec, &volume);
939 }
940 } else {
941 void *ptr;
942 result->memblock = pa_memblock_new(s->core->mempool, length);
943
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
946 ptr, length,
947 &s->sample_spec,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
951
952 result->index = 0;
953 }
954
955 inputs_drop(s, info, n, result);
956
957 pa_sink_unref(s);
958 }
959
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
963 unsigned n;
964 size_t length, block_size_max;
965
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target);
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
973
974 pa_assert(!s->thread_info.rewind_requested);
975 pa_assert(s->thread_info.rewind_nbytes == 0);
976
977 if (s->thread_info.state == PA_SINK_SUSPENDED) {
978 pa_silence_memchunk(target, &s->sample_spec);
979 return;
980 }
981
982 pa_sink_ref(s);
983
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
988
989 pa_assert(length > 0);
990
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
992
993 if (n == 0) {
994 if (target->length > length)
995 target->length = length;
996
997 pa_silence_memchunk(target, &s->sample_spec);
998 } else if (n == 1) {
999 pa_cvolume volume;
1000
1001 if (target->length > length)
1002 target->length = length;
1003
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1005
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1008 else {
1009 pa_memchunk vchunk;
1010
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1013
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1016
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1020 }
1021
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1024 }
1025
1026 } else {
1027 void *ptr;
1028
1029 ptr = pa_memblock_acquire(target->memblock);
1030
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1033 &s->sample_spec,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1036
1037 pa_memblock_release(target->memblock);
1038 }
1039
1040 inputs_drop(s, info, n, target);
1041
1042 pa_sink_unref(s);
1043 }
1044
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1047 pa_memchunk chunk;
1048 size_t l, d;
1049
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target);
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1057
1058 pa_assert(!s->thread_info.rewind_requested);
1059 pa_assert(s->thread_info.rewind_nbytes == 0);
1060
1061 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1062 pa_silence_memchunk(target, &s->sample_spec);
1063 return;
1064 }
1065
1066 pa_sink_ref(s);
1067
1068 l = target->length;
1069 d = 0;
1070 while (l > 0) {
1071 chunk = *target;
1072 chunk.index += d;
1073 chunk.length -= d;
1074
1075 pa_sink_render_into(s, &chunk);
1076
1077 d += chunk.length;
1078 l -= chunk.length;
1079 }
1080
1081 pa_sink_unref(s);
1082 }
1083
1084 /* Called from IO thread context */
1085 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1086 pa_sink_assert_ref(s);
1087 pa_sink_assert_io_context(s);
1088 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1089 pa_assert(length > 0);
1090 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1091 pa_assert(result);
1092
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1095
1096 pa_sink_ref(s);
1097
1098 pa_sink_render(s, length, result);
1099
1100 if (result->length < length) {
1101 pa_memchunk chunk;
1102
1103 pa_memchunk_make_writable(result, length);
1104
1105 chunk.memblock = result->memblock;
1106 chunk.index = result->index + result->length;
1107 chunk.length = length - result->length;
1108
1109 pa_sink_render_into_full(s, &chunk);
1110
1111 result->length = length;
1112 }
1113
1114 pa_sink_unref(s);
1115 }
1116
1117 /* Called from main thread */
1118 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1119 pa_usec_t usec = 0;
1120
1121 pa_sink_assert_ref(s);
1122 pa_assert_ctl_context();
1123 pa_assert(PA_SINK_IS_LINKED(s->state));
1124
1125 /* The returned value is supposed to be in the time domain of the sound card! */
1126
1127 if (s->state == PA_SINK_SUSPENDED)
1128 return 0;
1129
1130 if (!(s->flags & PA_SINK_LATENCY))
1131 return 0;
1132
1133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1134
1135 return usec;
1136 }
1137
1138 /* Called from IO thread */
1139 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1140 pa_usec_t usec = 0;
1141 pa_msgobject *o;
1142
1143 pa_sink_assert_ref(s);
1144 pa_sink_assert_io_context(s);
1145 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1146
1147 /* The returned value is supposed to be in the time domain of the sound card! */
1148
1149 if (s->thread_info.state == PA_SINK_SUSPENDED)
1150 return 0;
1151
1152 if (!(s->flags & PA_SINK_LATENCY))
1153 return 0;
1154
1155 o = PA_MSGOBJECT(s);
1156
1157 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1158
1159 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1160 return -1;
1161
1162 return usec;
1163 }
1164
1165 static pa_cvolume* cvolume_remap_minimal_impact(
1166 pa_cvolume *v,
1167 const pa_cvolume *template,
1168 const pa_channel_map *from,
1169 const pa_channel_map *to) {
1170
1171 pa_cvolume t;
1172
1173 pa_assert(v);
1174 pa_assert(template);
1175 pa_assert(from);
1176 pa_assert(to);
1177
1178 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(v, from), NULL);
1179 pa_return_val_if_fail(pa_cvolume_compatible_with_channel_map(template, to), NULL);
1180
1181 /* Much like pa_cvolume_remap(), but tries to minimize impact when
1182 * mapping from sink input to sink volumes:
1183 *
1184 * If template is a possible remapping from v it is used instead
1185 * of remapping anew.
1186 *
1187 * If the channel maps don't match we set an all-channel volume on
1188 * the sink to ensure that changing a volume on one stream has no
1189 * effect that cannot be compensated for in another stream that
1190 * does not have the same channel map as the sink. */
1191
1192 if (pa_channel_map_equal(from, to))
1193 return v;
1194
1195 t = *template;
1196 if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
1197 *v = *template;
1198 return v;
1199 }
1200
1201 pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
1202 return v;
1203 }
1204
1205 /* Called from main context */
1206 static void compute_reference_ratios(pa_sink *s) {
1207 uint32_t idx;
1208 pa_sink_input *i;
1209
1210 pa_sink_assert_ref(s);
1211 pa_assert_ctl_context();
1212 pa_assert(PA_SINK_IS_LINKED(s->state));
1213 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1214
1215 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1216 unsigned c;
1217 pa_cvolume remapped;
1218
1219 /*
1220 * Calculates the reference volume from the sink's reference
1221 * volume. This basically calculates:
1222 *
1223 * i->reference_ratio = i->volume / s->reference_volume
1224 */
1225
1226 remapped = s->reference_volume;
1227 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1228
1229 i->reference_ratio.channels = i->sample_spec.channels;
1230
1231 for (c = 0; c < i->sample_spec.channels; c++) {
1232
1233 /* We don't update when the sink volume is 0 anyway */
1234 if (remapped.values[c] <= PA_VOLUME_MUTED)
1235 continue;
1236
1237 /* Don't update the reference ratio unless necessary */
1238 if (pa_sw_volume_multiply(
1239 i->reference_ratio.values[c],
1240 remapped.values[c]) == i->volume.values[c])
1241 continue;
1242
1243 i->reference_ratio.values[c] = pa_sw_volume_divide(
1244 i->volume.values[c],
1245 remapped.values[c]);
1246 }
1247 }
1248 }
1249
1250 /* Called from main context */
1251 static void compute_real_ratios(pa_sink *s) {
1252 pa_sink_input *i;
1253 uint32_t idx;
1254
1255 pa_sink_assert_ref(s);
1256 pa_assert_ctl_context();
1257 pa_assert(PA_SINK_IS_LINKED(s->state));
1258 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1259
1260 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1261 unsigned c;
1262 pa_cvolume remapped;
1263
1264 /*
1265 * This basically calculates:
1266 *
1267 * i->real_ratio := i->volume / s->real_volume
1268 * i->soft_volume := i->real_ratio * i->volume_factor
1269 */
1270
1271 remapped = s->real_volume;
1272 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1273
1274 i->real_ratio.channels = i->sample_spec.channels;
1275 i->soft_volume.channels = i->sample_spec.channels;
1276
1277 for (c = 0; c < i->sample_spec.channels; c++) {
1278
1279 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1280 /* We leave i->real_ratio untouched */
1281 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1282 continue;
1283 }
1284
1285 /* Don't lose accuracy unless necessary */
1286 if (pa_sw_volume_multiply(
1287 i->real_ratio.values[c],
1288 remapped.values[c]) != i->volume.values[c])
1289
1290 i->real_ratio.values[c] = pa_sw_volume_divide(
1291 i->volume.values[c],
1292 remapped.values[c]);
1293
1294 i->soft_volume.values[c] = pa_sw_volume_multiply(
1295 i->real_ratio.values[c],
1296 i->volume_factor.values[c]);
1297 }
1298
1299 /* We don't copy the soft_volume to the thread_info data
1300 * here. That must be done by the caller */
1301 }
1302 }
1303
1304 /* Called from main thread */
1305 static void compute_real_volume(pa_sink *s) {
1306 pa_sink_input *i;
1307 uint32_t idx;
1308
1309 pa_sink_assert_ref(s);
1310 pa_assert_ctl_context();
1311 pa_assert(PA_SINK_IS_LINKED(s->state));
1312 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1313
1314 /* This determines the maximum volume of all streams and sets
1315 * s->real_volume accordingly. */
1316
1317 if (pa_idxset_isempty(s->inputs)) {
1318 /* In the special case that we have no sink input we leave the
1319 * volume unmodified. */
1320 s->real_volume = s->reference_volume;
1321 return;
1322 }
1323
1324 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1325
1326 /* First let's determine the new maximum volume of all inputs
1327 * connected to this sink */
1328 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1329 pa_cvolume remapped;
1330
1331 remapped = i->volume;
1332 cvolume_remap_minimal_impact(&remapped, &s->real_volume, &i->channel_map, &s->channel_map);
1333 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1334 }
1335
1336 /* Then, let's update the real ratios/soft volumes of all inputs
1337 * connected to this sink */
1338 compute_real_ratios(s);
1339 }
1340
1341 /* Called from main thread */
1342 static void propagate_reference_volume(pa_sink *s) {
1343 pa_sink_input *i;
1344 uint32_t idx;
1345
1346 pa_sink_assert_ref(s);
1347 pa_assert_ctl_context();
1348 pa_assert(PA_SINK_IS_LINKED(s->state));
1349 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1350
1351 /* This is called whenever the sink volume changes that is not
1352 * caused by a sink input volume change. We need to fix up the
1353 * sink input volumes accordingly */
1354
1355 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1356 pa_cvolume old_volume, remapped;
1357
1358 old_volume = i->volume;
1359
1360 /* This basically calculates:
1361 *
1362 * i->volume := s->reference_volume * i->reference_ratio */
1363
1364 remapped = s->reference_volume;
1365 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1366 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1367
1368 /* The volume changed, let's tell people so */
1369 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1370
1371 if (i->volume_changed)
1372 i->volume_changed(i);
1373
1374 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1375 }
1376 }
1377 }
1378
1379 /* Called from main thread */
1380 void pa_sink_set_volume(
1381 pa_sink *s,
1382 const pa_cvolume *volume,
1383 pa_bool_t send_msg,
1384 pa_bool_t save) {
1385
1386 pa_cvolume old_reference_volume;
1387 pa_bool_t reference_changed;
1388
1389 pa_sink_assert_ref(s);
1390 pa_assert_ctl_context();
1391 pa_assert(PA_SINK_IS_LINKED(s->state));
1392 pa_assert(!volume || pa_cvolume_valid(volume));
1393 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1394 pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
1395
1396 /* As a special exception we accept mono volumes on all sinks --
1397 * even on those with more complex channel maps */
1398
1399 /* If volume is NULL we synchronize the sink's real and reference
1400 * volumes with the stream volumes. If it is not NULL we update
1401 * the reference_volume with it. */
1402
1403 old_reference_volume = s->reference_volume;
1404
1405 if (volume) {
1406
1407 if (pa_cvolume_compatible(volume, &s->sample_spec))
1408 s->reference_volume = *volume;
1409 else
1410 pa_cvolume_scale(&s->reference_volume, pa_cvolume_max(volume));
1411
1412 if (s->flags & PA_SINK_FLAT_VOLUME) {
1413 /* OK, propagate this volume change back to the inputs */
1414 propagate_reference_volume(s);
1415
1416 /* And now recalculate the real volume */
1417 compute_real_volume(s);
1418 } else
1419 s->real_volume = s->reference_volume;
1420
1421 } else {
1422 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1423
1424 /* Ok, let's determine the new real volume */
1425 compute_real_volume(s);
1426
1427 /* Let's 'push' the reference volume if necessary */
1428 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1429
1430 /* We need to fix the reference ratios of all streams now that
1431 * we changed the reference volume */
1432 compute_reference_ratios(s);
1433 }
1434
1435 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1436 s->save_volume = (!reference_changed && s->save_volume) || save;
1437
1438 if (s->set_volume) {
1439 /* If we have a function set_volume(), then we do not apply a
1440 * soft volume by default. However, set_volume() is free to
1441 * apply one to s->soft_volume */
1442
1443 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1444 s->set_volume(s);
1445
1446 } else
1447 /* If we have no function set_volume(), then the soft volume
1448 * becomes the virtual volume */
1449 s->soft_volume = s->real_volume;
1450
1451 /* This tells the sink that soft and/or virtual volume changed */
1452 if (send_msg)
1453 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1454
1455 if (reference_changed)
1456 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1457 }
1458
1459 /* Called from main thread. Only to be called by sink implementor */
1460 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1461 pa_sink_assert_ref(s);
1462 pa_assert_ctl_context();
1463
1464 if (!volume)
1465 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1466 else
1467 s->soft_volume = *volume;
1468
1469 if (PA_SINK_IS_LINKED(s->state))
1470 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1471 else
1472 s->thread_info.soft_volume = s->soft_volume;
1473 }
1474
1475 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1476 pa_sink_input *i;
1477 uint32_t idx;
1478 pa_cvolume old_reference_volume;
1479
1480 pa_sink_assert_ref(s);
1481 pa_assert_ctl_context();
1482 pa_assert(PA_SINK_IS_LINKED(s->state));
1483
1484 /* This is called when the hardware's real volume changes due to
1485 * some external event. We copy the real volume into our
1486 * reference volume and then rebuild the stream volumes based on
1487 * i->real_ratio which should stay fixed. */
1488
1489 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1490 return;
1491
1492 old_reference_volume = s->reference_volume;
1493
1494 /* 1. Make the real volume the reference volume */
1495 s->reference_volume = s->real_volume;
1496
1497 if (s->flags & PA_SINK_FLAT_VOLUME) {
1498
1499 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1500 pa_cvolume old_volume, remapped;
1501
1502 old_volume = i->volume;
1503
1504 /* 2. Since the sink's reference and real volumes are equal
1505 * now our ratios should be too. */
1506 i->reference_ratio = i->real_ratio;
1507
1508 /* 3. Recalculate the new stream reference volume based on the
1509 * reference ratio and the sink's reference volume.
1510 *
1511 * This basically calculates:
1512 *
1513 * i->volume = s->reference_volume * i->reference_ratio
1514 *
1515 * This is identical to propagate_reference_volume() */
1516 remapped = s->reference_volume;
1517 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1518 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1519
1520 /* Notify if something changed */
1521 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1522
1523 if (i->volume_changed)
1524 i->volume_changed(i);
1525
1526 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1527 }
1528 }
1529 }
1530
1531 /* Something got changed in the hardware. It probably makes sense
1532 * to save changed hw settings given that hw volume changes not
1533 * triggered by PA are almost certainly done by the user. */
1534 s->save_volume = TRUE;
1535
1536 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1537 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1538 }
1539
1540 /* Called from main thread */
1541 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1542 pa_sink_assert_ref(s);
1543 pa_assert_ctl_context();
1544 pa_assert(PA_SINK_IS_LINKED(s->state));
1545
1546 if (s->refresh_volume || force_refresh) {
1547 struct pa_cvolume old_real_volume;
1548
1549 old_real_volume = s->real_volume;
1550
1551 if (s->get_volume)
1552 s->get_volume(s);
1553
1554 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1555
1556 propagate_real_volume(s, &old_real_volume);
1557 }
1558
1559 return &s->reference_volume;
1560 }
1561
1562 /* Called from main thread */
1563 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1564 pa_cvolume old_real_volume;
1565
1566 pa_sink_assert_ref(s);
1567 pa_assert_ctl_context();
1568 pa_assert(PA_SINK_IS_LINKED(s->state));
1569
1570 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1571
1572 old_real_volume = s->real_volume;
1573 s->real_volume = *new_real_volume;
1574
1575 propagate_real_volume(s, &old_real_volume);
1576 }
1577
1578 /* Called from main thread */
1579 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1580 pa_bool_t old_muted;
1581
1582 pa_sink_assert_ref(s);
1583 pa_assert_ctl_context();
1584 pa_assert(PA_SINK_IS_LINKED(s->state));
1585
1586 old_muted = s->muted;
1587 s->muted = mute;
1588 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1589
1590 if (s->set_mute)
1591 s->set_mute(s);
1592
1593 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1594
1595 if (old_muted != s->muted)
1596 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1597 }
1598
1599 /* Called from main thread */
1600 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1601
1602 pa_sink_assert_ref(s);
1603 pa_assert_ctl_context();
1604 pa_assert(PA_SINK_IS_LINKED(s->state));
1605
1606 if (s->refresh_muted || force_refresh) {
1607 pa_bool_t old_muted = s->muted;
1608
1609 if (s->get_mute)
1610 s->get_mute(s);
1611
1612 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1613
1614 if (old_muted != s->muted) {
1615 s->save_muted = TRUE;
1616
1617 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1618
1619 /* Make sure the soft mute status stays in sync */
1620 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1621 }
1622 }
1623
1624 return s->muted;
1625 }
1626
1627 /* Called from main thread */
1628 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1629 pa_sink_assert_ref(s);
1630 pa_assert_ctl_context();
1631 pa_assert(PA_SINK_IS_LINKED(s->state));
1632
1633 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1634
1635 if (s->muted == new_muted)
1636 return;
1637
1638 s->muted = new_muted;
1639 s->save_muted = TRUE;
1640
1641 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1642 }
1643
1644 /* Called from main thread */
1645 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1646 pa_sink_assert_ref(s);
1647 pa_assert_ctl_context();
1648
1649 if (p)
1650 pa_proplist_update(s->proplist, mode, p);
1651
1652 if (PA_SINK_IS_LINKED(s->state)) {
1653 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1654 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1655 }
1656
1657 return TRUE;
1658 }
1659
1660 /* Called from main thread */
1661 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1662 void pa_sink_set_description(pa_sink *s, const char *description) {
1663 const char *old;
1664 pa_sink_assert_ref(s);
1665 pa_assert_ctl_context();
1666
1667 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1668 return;
1669
1670 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1671
1672 if (old && description && pa_streq(old, description))
1673 return;
1674
1675 if (description)
1676 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1677 else
1678 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1679
1680 if (s->monitor_source) {
1681 char *n;
1682
1683 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1684 pa_source_set_description(s->monitor_source, n);
1685 pa_xfree(n);
1686 }
1687
1688 if (PA_SINK_IS_LINKED(s->state)) {
1689 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1690 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1691 }
1692 }
1693
1694 /* Called from main thread */
1695 unsigned pa_sink_linked_by(pa_sink *s) {
1696 unsigned ret;
1697
1698 pa_sink_assert_ref(s);
1699 pa_assert_ctl_context();
1700 pa_assert(PA_SINK_IS_LINKED(s->state));
1701
1702 ret = pa_idxset_size(s->inputs);
1703
1704 /* We add in the number of streams connected to us here. Please
1705 * note the asymmmetry to pa_sink_used_by()! */
1706
1707 if (s->monitor_source)
1708 ret += pa_source_linked_by(s->monitor_source);
1709
1710 return ret;
1711 }
1712
1713 /* Called from main thread */
1714 unsigned pa_sink_used_by(pa_sink *s) {
1715 unsigned ret;
1716
1717 pa_sink_assert_ref(s);
1718 pa_assert_ctl_context();
1719 pa_assert(PA_SINK_IS_LINKED(s->state));
1720
1721 ret = pa_idxset_size(s->inputs);
1722 pa_assert(ret >= s->n_corked);
1723
1724 /* Streams connected to our monitor source do not matter for
1725 * pa_sink_used_by()!.*/
1726
1727 return ret - s->n_corked;
1728 }
1729
1730 /* Called from main thread */
1731 unsigned pa_sink_check_suspend(pa_sink *s) {
1732 unsigned ret;
1733 pa_sink_input *i;
1734 uint32_t idx;
1735
1736 pa_sink_assert_ref(s);
1737 pa_assert_ctl_context();
1738
1739 if (!PA_SINK_IS_LINKED(s->state))
1740 return 0;
1741
1742 ret = 0;
1743
1744 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1745 pa_sink_input_state_t st;
1746
1747 st = pa_sink_input_get_state(i);
1748 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1749
1750 if (st == PA_SINK_INPUT_CORKED)
1751 continue;
1752
1753 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1754 continue;
1755
1756 ret ++;
1757 }
1758
1759 if (s->monitor_source)
1760 ret += pa_source_check_suspend(s->monitor_source);
1761
1762 return ret;
1763 }
1764
1765 /* Called from the IO thread */
1766 static void sync_input_volumes_within_thread(pa_sink *s) {
1767 pa_sink_input *i;
1768 void *state = NULL;
1769
1770 pa_sink_assert_ref(s);
1771 pa_sink_assert_io_context(s);
1772
1773 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1774 if (pa_atomic_load(&i->before_ramping_v))
1775 i->thread_info.future_soft_volume = i->soft_volume;
1776
1777 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1778 continue;
1779
1780 if (!pa_atomic_load(&i->before_ramping_v))
1781 i->thread_info.soft_volume = i->soft_volume;
1782 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1783 }
1784 }
1785
1786 /* Called from IO thread, except when it is not */
1787 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1788 pa_sink *s = PA_SINK(o);
1789 pa_sink_assert_ref(s);
1790
1791 switch ((pa_sink_message_t) code) {
1792
1793 case PA_SINK_MESSAGE_ADD_INPUT: {
1794 pa_sink_input *i = PA_SINK_INPUT(userdata);
1795
1796 /* If you change anything here, make sure to change the
1797 * sink input handling a few lines down at
1798 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1799
1800 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1801
1802 /* Since the caller sleeps in pa_sink_input_put(), we can
1803 * safely access data outside of thread_info even though
1804 * it is mutable */
1805
1806 if ((i->thread_info.sync_prev = i->sync_prev)) {
1807 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1808 pa_assert(i->sync_prev->sync_next == i);
1809 i->thread_info.sync_prev->thread_info.sync_next = i;
1810 }
1811
1812 if ((i->thread_info.sync_next = i->sync_next)) {
1813 pa_assert(i->sink == i->thread_info.sync_next->sink);
1814 pa_assert(i->sync_next->sync_prev == i);
1815 i->thread_info.sync_next->thread_info.sync_prev = i;
1816 }
1817
1818 pa_assert(!i->thread_info.attached);
1819 i->thread_info.attached = TRUE;
1820
1821 if (i->attach)
1822 i->attach(i);
1823
1824 pa_sink_input_set_state_within_thread(i, i->state);
1825
1826 /* The requested latency of the sink input needs to be
1827 * fixed up and then configured on the sink */
1828
1829 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1830 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1831
1832 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1833 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1834
1835 /* We don't rewind here automatically. This is left to the
1836 * sink input implementor because some sink inputs need a
1837 * slow start, i.e. need some time to buffer client
1838 * samples before beginning streaming. */
1839
1840 /* In flat volume mode we need to update the volume as
1841 * well */
1842 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1843 }
1844
1845 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1846 pa_sink_input *i = PA_SINK_INPUT(userdata);
1847
1848 /* If you change anything here, make sure to change the
1849 * sink input handling a few lines down at
1850 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1851
1852 if (i->detach)
1853 i->detach(i);
1854
1855 pa_sink_input_set_state_within_thread(i, i->state);
1856
1857 pa_assert(i->thread_info.attached);
1858 i->thread_info.attached = FALSE;
1859
1860 /* Since the caller sleeps in pa_sink_input_unlink(),
1861 * we can safely access data outside of thread_info even
1862 * though it is mutable */
1863
1864 pa_assert(!i->sync_prev);
1865 pa_assert(!i->sync_next);
1866
1867 if (i->thread_info.sync_prev) {
1868 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1869 i->thread_info.sync_prev = NULL;
1870 }
1871
1872 if (i->thread_info.sync_next) {
1873 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1874 i->thread_info.sync_next = NULL;
1875 }
1876
1877 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1878 pa_sink_input_unref(i);
1879
1880 pa_sink_invalidate_requested_latency(s, TRUE);
1881 pa_sink_request_rewind(s, (size_t) -1);
1882
1883 /* In flat volume mode we need to update the volume as
1884 * well */
1885 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1886 }
1887
1888 case PA_SINK_MESSAGE_START_MOVE: {
1889 pa_sink_input *i = PA_SINK_INPUT(userdata);
1890
1891 /* We don't support moving synchronized streams. */
1892 pa_assert(!i->sync_prev);
1893 pa_assert(!i->sync_next);
1894 pa_assert(!i->thread_info.sync_next);
1895 pa_assert(!i->thread_info.sync_prev);
1896
1897 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1898 pa_usec_t usec = 0;
1899 size_t sink_nbytes, total_nbytes;
1900
1901 /* Get the latency of the sink */
1902 usec = pa_sink_get_latency_within_thread(s);
1903 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1904 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1905
1906 if (total_nbytes > 0) {
1907 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1908 i->thread_info.rewrite_flush = TRUE;
1909 pa_sink_input_process_rewind(i, sink_nbytes);
1910 }
1911 }
1912
1913 if (i->detach)
1914 i->detach(i);
1915
1916 pa_assert(i->thread_info.attached);
1917 i->thread_info.attached = FALSE;
1918
1919 /* Let's remove the sink input ...*/
1920 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1921 pa_sink_input_unref(i);
1922
1923 pa_sink_invalidate_requested_latency(s, TRUE);
1924
1925 pa_log_debug("Requesting rewind due to started move");
1926 pa_sink_request_rewind(s, (size_t) -1);
1927
1928 /* In flat volume mode we need to update the volume as
1929 * well */
1930 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1931 }
1932
1933 case PA_SINK_MESSAGE_FINISH_MOVE: {
1934 pa_sink_input *i = PA_SINK_INPUT(userdata);
1935
1936 /* We don't support moving synchronized streams. */
1937 pa_assert(!i->sync_prev);
1938 pa_assert(!i->sync_next);
1939 pa_assert(!i->thread_info.sync_next);
1940 pa_assert(!i->thread_info.sync_prev);
1941
1942 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1943
1944 pa_assert(!i->thread_info.attached);
1945 i->thread_info.attached = TRUE;
1946
1947 if (i->attach)
1948 i->attach(i);
1949
1950 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1951 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1952
1953 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1954 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1955
1956 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1957 pa_usec_t usec = 0;
1958 size_t nbytes;
1959
1960 /* Get the latency of the sink */
1961 usec = pa_sink_get_latency_within_thread(s);
1962 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1963
1964 if (nbytes > 0)
1965 pa_sink_input_drop(i, nbytes);
1966
1967 pa_log_debug("Requesting rewind due to finished move");
1968 pa_sink_request_rewind(s, nbytes);
1969 }
1970
1971 /* In flat volume mode we need to update the volume as
1972 * well */
1973 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1974 }
1975
1976 case PA_SINK_MESSAGE_SET_VOLUME:
1977
1978 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1979 s->thread_info.soft_volume = s->soft_volume;
1980 pa_sink_request_rewind(s, (size_t) -1);
1981 }
1982
1983 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1984 return 0;
1985
1986 /* Fall through ... */
1987
1988 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1989 sync_input_volumes_within_thread(s);
1990 return 0;
1991
1992 case PA_SINK_MESSAGE_GET_VOLUME:
1993 return 0;
1994
1995 case PA_SINK_MESSAGE_SET_MUTE:
1996
1997 if (s->thread_info.soft_muted != s->muted) {
1998 s->thread_info.soft_muted = s->muted;
1999 pa_sink_request_rewind(s, (size_t) -1);
2000 }
2001
2002 return 0;
2003
2004 case PA_SINK_MESSAGE_GET_MUTE:
2005 return 0;
2006
2007 case PA_SINK_MESSAGE_SET_STATE: {
2008
2009 pa_bool_t suspend_change =
2010 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2011 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2012
2013 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2014
2015 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2016 s->thread_info.rewind_nbytes = 0;
2017 s->thread_info.rewind_requested = FALSE;
2018 }
2019
2020 if (suspend_change) {
2021 pa_sink_input *i;
2022 void *state = NULL;
2023
2024 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2025 if (i->suspend_within_thread)
2026 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2027 }
2028
2029 return 0;
2030 }
2031
2032 case PA_SINK_MESSAGE_DETACH:
2033
2034 /* Detach all streams */
2035 pa_sink_detach_within_thread(s);
2036 return 0;
2037
2038 case PA_SINK_MESSAGE_ATTACH:
2039
2040 /* Reattach all streams */
2041 pa_sink_attach_within_thread(s);
2042 return 0;
2043
2044 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2045
2046 pa_usec_t *usec = userdata;
2047 *usec = pa_sink_get_requested_latency_within_thread(s);
2048
2049 /* Yes, that's right, the IO thread will see -1 when no
2050 * explicit requested latency is configured, the main
2051 * thread will see max_latency */
2052 if (*usec == (pa_usec_t) -1)
2053 *usec = s->thread_info.max_latency;
2054
2055 return 0;
2056 }
2057
2058 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2059 pa_usec_t *r = userdata;
2060
2061 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2062
2063 return 0;
2064 }
2065
2066 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2067 pa_usec_t *r = userdata;
2068
2069 r[0] = s->thread_info.min_latency;
2070 r[1] = s->thread_info.max_latency;
2071
2072 return 0;
2073 }
2074
2075 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2076
2077 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2078 return 0;
2079
2080 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2081
2082 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2083 return 0;
2084
2085 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2086
2087 *((size_t*) userdata) = s->thread_info.max_rewind;
2088 return 0;
2089
2090 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2091
2092 *((size_t*) userdata) = s->thread_info.max_request;
2093 return 0;
2094
2095 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2096
2097 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2098 return 0;
2099
2100 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2101
2102 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2103 return 0;
2104
2105 case PA_SINK_MESSAGE_GET_LATENCY:
2106 case PA_SINK_MESSAGE_MAX:
2107 ;
2108 }
2109
2110 return -1;
2111 }
2112
2113 /* Called from main thread */
2114 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2115 pa_sink *sink;
2116 uint32_t idx;
2117 int ret = 0;
2118
2119 pa_core_assert_ref(c);
2120 pa_assert_ctl_context();
2121 pa_assert(cause != 0);
2122
2123 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2124 int r;
2125
2126 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2127 ret = r;
2128 }
2129
2130 return ret;
2131 }
2132
2133 /* Called from main thread */
2134 void pa_sink_detach(pa_sink *s) {
2135 pa_sink_assert_ref(s);
2136 pa_assert_ctl_context();
2137 pa_assert(PA_SINK_IS_LINKED(s->state));
2138
2139 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2140 }
2141
2142 /* Called from main thread */
2143 void pa_sink_attach(pa_sink *s) {
2144 pa_sink_assert_ref(s);
2145 pa_assert_ctl_context();
2146 pa_assert(PA_SINK_IS_LINKED(s->state));
2147
2148 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2149 }
2150
2151 /* Called from IO thread */
2152 void pa_sink_detach_within_thread(pa_sink *s) {
2153 pa_sink_input *i;
2154 void *state = NULL;
2155
2156 pa_sink_assert_ref(s);
2157 pa_sink_assert_io_context(s);
2158 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2159
2160 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2161 if (i->detach)
2162 i->detach(i);
2163
2164 if (s->monitor_source)
2165 pa_source_detach_within_thread(s->monitor_source);
2166 }
2167
2168 /* Called from IO thread */
2169 void pa_sink_attach_within_thread(pa_sink *s) {
2170 pa_sink_input *i;
2171 void *state = NULL;
2172
2173 pa_sink_assert_ref(s);
2174 pa_sink_assert_io_context(s);
2175 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2176
2177 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2178 if (i->attach)
2179 i->attach(i);
2180
2181 if (s->monitor_source)
2182 pa_source_attach_within_thread(s->monitor_source);
2183 }
2184
2185 /* Called from IO thread */
2186 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2187 pa_sink_assert_ref(s);
2188 pa_sink_assert_io_context(s);
2189 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2190
2191 if (s->thread_info.state == PA_SINK_SUSPENDED)
2192 return;
2193
2194 if (nbytes == (size_t) -1)
2195 nbytes = s->thread_info.max_rewind;
2196
2197 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2198
2199 if (s->thread_info.rewind_requested &&
2200 nbytes <= s->thread_info.rewind_nbytes)
2201 return;
2202
2203 s->thread_info.rewind_nbytes = nbytes;
2204 s->thread_info.rewind_requested = TRUE;
2205
2206 if (s->request_rewind)
2207 s->request_rewind(s);
2208 }
2209
2210 /* Called from IO thread */
2211 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2212 pa_usec_t result = (pa_usec_t) -1;
2213 pa_sink_input *i;
2214 void *state = NULL;
2215 pa_usec_t monitor_latency;
2216
2217 pa_sink_assert_ref(s);
2218 pa_sink_assert_io_context(s);
2219
2220 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2221 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2222
2223 if (s->thread_info.requested_latency_valid)
2224 return s->thread_info.requested_latency;
2225
2226 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2227 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2228 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2229 result = i->thread_info.requested_sink_latency;
2230
2231 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2232
2233 if (monitor_latency != (pa_usec_t) -1 &&
2234 (result == (pa_usec_t) -1 || result > monitor_latency))
2235 result = monitor_latency;
2236
2237 if (result != (pa_usec_t) -1)
2238 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2239
2240 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2241 /* Only cache if properly initialized */
2242 s->thread_info.requested_latency = result;
2243 s->thread_info.requested_latency_valid = TRUE;
2244 }
2245
2246 return result;
2247 }
2248
2249 /* Called from main thread */
2250 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2251 pa_usec_t usec = 0;
2252
2253 pa_sink_assert_ref(s);
2254 pa_assert_ctl_context();
2255 pa_assert(PA_SINK_IS_LINKED(s->state));
2256
2257 if (s->state == PA_SINK_SUSPENDED)
2258 return 0;
2259
2260 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2261 return usec;
2262 }
2263
2264 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2265 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2266 pa_sink_input *i;
2267 void *state = NULL;
2268
2269 pa_sink_assert_ref(s);
2270 pa_sink_assert_io_context(s);
2271
2272 if (max_rewind == s->thread_info.max_rewind)
2273 return;
2274
2275 s->thread_info.max_rewind = max_rewind;
2276
2277 if (PA_SINK_IS_LINKED(s->thread_info.state))
2278 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2279 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2280
2281 if (s->monitor_source)
2282 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2283 }
2284
2285 /* Called from main thread */
2286 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2287 pa_sink_assert_ref(s);
2288 pa_assert_ctl_context();
2289
2290 if (PA_SINK_IS_LINKED(s->state))
2291 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2292 else
2293 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2294 }
2295
2296 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2297 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2298 void *state = NULL;
2299
2300 pa_sink_assert_ref(s);
2301 pa_sink_assert_io_context(s);
2302
2303 if (max_request == s->thread_info.max_request)
2304 return;
2305
2306 s->thread_info.max_request = max_request;
2307
2308 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2309 pa_sink_input *i;
2310
2311 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2312 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2313 }
2314 }
2315
2316 /* Called from main thread */
2317 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2318 pa_sink_assert_ref(s);
2319 pa_assert_ctl_context();
2320
2321 if (PA_SINK_IS_LINKED(s->state))
2322 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2323 else
2324 pa_sink_set_max_request_within_thread(s, max_request);
2325 }
2326
2327 /* Called from IO thread */
2328 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2329 pa_sink_input *i;
2330 void *state = NULL;
2331
2332 pa_sink_assert_ref(s);
2333 pa_sink_assert_io_context(s);
2334
2335 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2336 s->thread_info.requested_latency_valid = FALSE;
2337 else if (dynamic)
2338 return;
2339
2340 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2341
2342 if (s->update_requested_latency)
2343 s->update_requested_latency(s);
2344
2345 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2346 if (i->update_sink_requested_latency)
2347 i->update_sink_requested_latency(i);
2348 }
2349 }
2350
2351 /* Called from main thread */
2352 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2353 pa_sink_assert_ref(s);
2354 pa_assert_ctl_context();
2355
2356 /* min_latency == 0: no limit
2357 * min_latency anything else: specified limit
2358 *
2359 * Similar for max_latency */
2360
2361 if (min_latency < ABSOLUTE_MIN_LATENCY)
2362 min_latency = ABSOLUTE_MIN_LATENCY;
2363
2364 if (max_latency <= 0 ||
2365 max_latency > ABSOLUTE_MAX_LATENCY)
2366 max_latency = ABSOLUTE_MAX_LATENCY;
2367
2368 pa_assert(min_latency <= max_latency);
2369
2370 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2371 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2372 max_latency == ABSOLUTE_MAX_LATENCY) ||
2373 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2374
2375 if (PA_SINK_IS_LINKED(s->state)) {
2376 pa_usec_t r[2];
2377
2378 r[0] = min_latency;
2379 r[1] = max_latency;
2380
2381 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2382 } else
2383 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2384 }
2385
2386 /* Called from main thread */
2387 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2388 pa_sink_assert_ref(s);
2389 pa_assert_ctl_context();
2390 pa_assert(min_latency);
2391 pa_assert(max_latency);
2392
2393 if (PA_SINK_IS_LINKED(s->state)) {
2394 pa_usec_t r[2] = { 0, 0 };
2395
2396 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2397
2398 *min_latency = r[0];
2399 *max_latency = r[1];
2400 } else {
2401 *min_latency = s->thread_info.min_latency;
2402 *max_latency = s->thread_info.max_latency;
2403 }
2404 }
2405
2406 /* Called from IO thread */
2407 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2408 pa_sink_assert_ref(s);
2409 pa_sink_assert_io_context(s);
2410
2411 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2412 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2413 pa_assert(min_latency <= max_latency);
2414
2415 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2416 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2417 max_latency == ABSOLUTE_MAX_LATENCY) ||
2418 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2419
2420 if (s->thread_info.min_latency == min_latency &&
2421 s->thread_info.max_latency == max_latency)
2422 return;
2423
2424 s->thread_info.min_latency = min_latency;
2425 s->thread_info.max_latency = max_latency;
2426
2427 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2428 pa_sink_input *i;
2429 void *state = NULL;
2430
2431 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2432 if (i->update_sink_latency_range)
2433 i->update_sink_latency_range(i);
2434 }
2435
2436 pa_sink_invalidate_requested_latency(s, FALSE);
2437
2438 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2439 }
2440
2441 /* Called from main thread */
2442 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2443 pa_sink_assert_ref(s);
2444 pa_assert_ctl_context();
2445
2446 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2447 pa_assert(latency == 0);
2448 return;
2449 }
2450
2451 if (latency < ABSOLUTE_MIN_LATENCY)
2452 latency = ABSOLUTE_MIN_LATENCY;
2453
2454 if (latency > ABSOLUTE_MAX_LATENCY)
2455 latency = ABSOLUTE_MAX_LATENCY;
2456
2457 if (PA_SINK_IS_LINKED(s->state))
2458 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2459 else
2460 s->thread_info.fixed_latency = latency;
2461
2462 pa_source_set_fixed_latency(s->monitor_source, latency);
2463 }
2464
2465 /* Called from main thread */
2466 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2467 pa_usec_t latency;
2468
2469 pa_sink_assert_ref(s);
2470 pa_assert_ctl_context();
2471
2472 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2473 return 0;
2474
2475 if (PA_SINK_IS_LINKED(s->state))
2476 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2477 else
2478 latency = s->thread_info.fixed_latency;
2479
2480 return latency;
2481 }
2482
2483 /* Called from IO thread */
2484 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2485 pa_sink_assert_ref(s);
2486 pa_sink_assert_io_context(s);
2487
2488 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2489 pa_assert(latency == 0);
2490 return;
2491 }
2492
2493 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2494 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2495
2496 if (s->thread_info.fixed_latency == latency)
2497 return;
2498
2499 s->thread_info.fixed_latency = latency;
2500
2501 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2502 pa_sink_input *i;
2503 void *state = NULL;
2504
2505 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2506 if (i->update_sink_fixed_latency)
2507 i->update_sink_fixed_latency(i);
2508 }
2509
2510 pa_sink_invalidate_requested_latency(s, FALSE);
2511
2512 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2513 }
2514
2515 /* Called from main context */
2516 size_t pa_sink_get_max_rewind(pa_sink *s) {
2517 size_t r;
2518 pa_sink_assert_ref(s);
2519 pa_assert_ctl_context();
2520
2521 if (!PA_SINK_IS_LINKED(s->state))
2522 return s->thread_info.max_rewind;
2523
2524 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2525
2526 return r;
2527 }
2528
2529 /* Called from main context */
2530 size_t pa_sink_get_max_request(pa_sink *s) {
2531 size_t r;
2532 pa_sink_assert_ref(s);
2533 pa_assert_ctl_context();
2534
2535 if (!PA_SINK_IS_LINKED(s->state))
2536 return s->thread_info.max_request;
2537
2538 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2539
2540 return r;
2541 }
2542
2543 /* Called from main context */
2544 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2545 pa_device_port *port;
2546
2547 pa_sink_assert_ref(s);
2548 pa_assert_ctl_context();
2549
2550 if (!s->set_port) {
2551 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2552 return -PA_ERR_NOTIMPLEMENTED;
2553 }
2554
2555 if (!s->ports)
2556 return -PA_ERR_NOENTITY;
2557
2558 if (!(port = pa_hashmap_get(s->ports, name)))
2559 return -PA_ERR_NOENTITY;
2560
2561 if (s->active_port == port) {
2562 s->save_port = s->save_port || save;
2563 return 0;
2564 }
2565
2566 if ((s->set_port(s, port)) < 0)
2567 return -PA_ERR_NOENTITY;
2568
2569 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2570
2571 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2572
2573 s->active_port = port;
2574 s->save_port = save;
2575
2576 return 0;
2577 }
2578
2579 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2580 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2581
2582 pa_assert(p);
2583
2584 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2585 return TRUE;
2586
2587 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2588
2589 if (pa_streq(ff, "microphone"))
2590 t = "audio-input-microphone";
2591 else if (pa_streq(ff, "webcam"))
2592 t = "camera-web";
2593 else if (pa_streq(ff, "computer"))
2594 t = "computer";
2595 else if (pa_streq(ff, "handset"))
2596 t = "phone";
2597 else if (pa_streq(ff, "portable"))
2598 t = "multimedia-player";
2599 else if (pa_streq(ff, "tv"))
2600 t = "video-display";
2601
2602 /*
2603 * The following icons are not part of the icon naming spec,
2604 * because Rodney Dawes sucks as the maintainer of that spec.
2605 *
2606 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2607 */
2608 else if (pa_streq(ff, "headset"))
2609 t = "audio-headset";
2610 else if (pa_streq(ff, "headphone"))
2611 t = "audio-headphones";
2612 else if (pa_streq(ff, "speaker"))
2613 t = "audio-speakers";
2614 else if (pa_streq(ff, "hands-free"))
2615 t = "audio-handsfree";
2616 }
2617
2618 if (!t)
2619 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2620 if (pa_streq(c, "modem"))
2621 t = "modem";
2622
2623 if (!t) {
2624 if (is_sink)
2625 t = "audio-card";
2626 else
2627 t = "audio-input-microphone";
2628 }
2629
2630 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2631 if (strstr(profile, "analog"))
2632 s = "-analog";
2633 else if (strstr(profile, "iec958"))
2634 s = "-iec958";
2635 else if (strstr(profile, "hdmi"))
2636 s = "-hdmi";
2637 }
2638
2639 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2640
2641 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2642
2643 return TRUE;
2644 }
2645
2646 pa_bool_t pa_device_init_description(pa_proplist *p) {
2647 const char *s, *d = NULL, *k;
2648 pa_assert(p);
2649
2650 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2651 return TRUE;
2652
2653 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2654 if (pa_streq(s, "internal"))
2655 d = _("Internal Audio");
2656
2657 if (!d)
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2659 if (pa_streq(s, "modem"))
2660 d = _("Modem");
2661
2662 if (!d)
2663 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2664
2665 if (!d)
2666 return FALSE;
2667
2668 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2669
2670 if (d && k)
2671 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2672 else if (d)
2673 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2674
2675 return TRUE;
2676 }
2677
2678 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2679 const char *s;
2680 pa_assert(p);
2681
2682 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2683 return TRUE;
2684
2685 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2686 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2687 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2688 return TRUE;
2689 }
2690
2691 return FALSE;
2692 }
2693
2694 unsigned pa_device_init_priority(pa_proplist *p) {
2695 const char *s;
2696 unsigned priority = 0;
2697
2698 pa_assert(p);
2699
2700 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
2701
2702 if (pa_streq(s, "sound"))
2703 priority += 9000;
2704 else if (!pa_streq(s, "modem"))
2705 priority += 1000;
2706 }
2707
2708 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2709
2710 if (pa_streq(s, "internal"))
2711 priority += 900;
2712 else if (pa_streq(s, "speaker"))
2713 priority += 500;
2714 else if (pa_streq(s, "headphone"))
2715 priority += 400;
2716 }
2717
2718 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
2719
2720 if (pa_streq(s, "pci"))
2721 priority += 50;
2722 else if (pa_streq(s, "usb"))
2723 priority += 40;
2724 else if (pa_streq(s, "bluetooth"))
2725 priority += 30;
2726 }
2727
2728 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2729
2730 if (pa_startswith(s, "analog-"))
2731 priority += 9;
2732 else if (pa_startswith(s, "iec958-"))
2733 priority += 8;
2734 }
2735
2736 return priority;
2737 }