]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: add priority field to pa_sink/pa_source
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->priority = 0;
240 s->suspend_cause = 0;
241 s->name = pa_xstrdup(name);
242 s->proplist = pa_proplist_copy(data->proplist);
243 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
244 s->module = data->module;
245 s->card = data->card;
246
247 s->sample_spec = data->sample_spec;
248 s->channel_map = data->channel_map;
249
250 s->inputs = pa_idxset_new(NULL, NULL);
251 s->n_corked = 0;
252
253 s->reference_volume = s->real_volume = data->volume;
254 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
255 s->base_volume = PA_VOLUME_NORM;
256 s->n_volume_steps = PA_VOLUME_NORM+1;
257 s->muted = data->muted;
258 s->refresh_volume = s->refresh_muted = FALSE;
259
260 reset_callbacks(s);
261 s->userdata = NULL;
262
263 s->asyncmsgq = NULL;
264
265 /* As a minor optimization we just steal the list instead of
266 * copying it here */
267 s->ports = data->ports;
268 data->ports = NULL;
269
270 s->active_port = NULL;
271 s->save_port = FALSE;
272
273 if (data->active_port && s->ports)
274 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
275 s->save_port = data->save_port;
276
277 if (!s->active_port && s->ports) {
278 void *state;
279 pa_device_port *p;
280
281 PA_HASHMAP_FOREACH(p, s->ports, state)
282 if (!s->active_port || p->priority > s->active_port->priority)
283 s->active_port = p;
284 }
285
286 s->save_volume = data->save_volume;
287 s->save_muted = data->save_muted;
288
289 pa_silence_memchunk_get(
290 &core->silence_cache,
291 core->mempool,
292 &s->silence,
293 &s->sample_spec,
294 0);
295
296 s->thread_info.rtpoll = NULL;
297 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
298 s->thread_info.soft_volume = s->soft_volume;
299 s->thread_info.soft_muted = s->muted;
300 s->thread_info.state = s->state;
301 s->thread_info.rewind_nbytes = 0;
302 s->thread_info.rewind_requested = FALSE;
303 s->thread_info.max_rewind = 0;
304 s->thread_info.max_request = 0;
305 s->thread_info.requested_latency_valid = FALSE;
306 s->thread_info.requested_latency = 0;
307 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
308 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
309 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
310
311 /* FIXME: This should probably be moved to pa_sink_put() */
312 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
313
314 if (s->card)
315 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
316
317 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
318 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
319 s->index,
320 s->name,
321 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
322 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
323 pt);
324 pa_xfree(pt);
325
326 pa_source_new_data_init(&source_data);
327 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
328 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
329 source_data.name = pa_sprintf_malloc("%s.monitor", name);
330 source_data.driver = data->driver;
331 source_data.module = data->module;
332 source_data.card = data->card;
333
334 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
335 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
336 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
337
338 s->monitor_source = pa_source_new(core, &source_data,
339 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
340 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
341
342 pa_source_new_data_done(&source_data);
343
344 if (!s->monitor_source) {
345 pa_sink_unlink(s);
346 pa_sink_unref(s);
347 return NULL;
348 }
349
350 s->monitor_source->monitor_of = s;
351
352 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
353 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
354 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
355
356 return s;
357 }
358
359 /* Called from main context */
360 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
361 int ret;
362 pa_bool_t suspend_change;
363 pa_sink_state_t original_state;
364
365 pa_assert(s);
366 pa_assert_ctl_context();
367
368 if (s->state == state)
369 return 0;
370
371 original_state = s->state;
372
373 suspend_change =
374 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
375 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
376
377 if (s->set_state)
378 if ((ret = s->set_state(s, state)) < 0)
379 return ret;
380
381 if (s->asyncmsgq)
382 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
383
384 if (s->set_state)
385 s->set_state(s, original_state);
386
387 return ret;
388 }
389
390 s->state = state;
391
392 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
393 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
394 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
395 }
396
397 if (suspend_change) {
398 pa_sink_input *i;
399 uint32_t idx;
400
401 /* We're suspending or resuming, tell everyone about it */
402
403 PA_IDXSET_FOREACH(i, s->inputs, idx)
404 if (s->state == PA_SINK_SUSPENDED &&
405 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
406 pa_sink_input_kill(i);
407 else if (i->suspend)
408 i->suspend(i, state == PA_SINK_SUSPENDED);
409
410 if (s->monitor_source)
411 pa_source_sync_suspend(s->monitor_source);
412 }
413
414 return 0;
415 }
416
417 /* Called from main context */
418 void pa_sink_put(pa_sink* s) {
419 pa_sink_assert_ref(s);
420 pa_assert_ctl_context();
421
422 pa_assert(s->state == PA_SINK_INIT);
423
424 /* The following fields must be initialized properly when calling _put() */
425 pa_assert(s->asyncmsgq);
426 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
427
428 /* Generally, flags should be initialized via pa_sink_new(). As a
429 * special exception we allow volume related flags to be set
430 * between _new() and _put(). */
431
432 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
433 s->flags |= PA_SINK_DECIBEL_VOLUME;
434
435 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
436 s->flags |= PA_SINK_FLAT_VOLUME;
437
438 /* We assume that if the sink implementor changed the default
439 * volume he did so in real_volume, because that is the usual
440 * place where he is supposed to place his changes. */
441 s->reference_volume = s->real_volume;
442
443 s->thread_info.soft_volume = s->soft_volume;
444 s->thread_info.soft_muted = s->muted;
445
446 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
447 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
448 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
449 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
450 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
451
452 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
453 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
454 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
455
456 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
457
458 pa_source_put(s->monitor_source);
459
460 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
461 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
462 }
463
464 /* Called from main context */
465 void pa_sink_unlink(pa_sink* s) {
466 pa_bool_t linked;
467 pa_sink_input *i, *j = NULL;
468
469 pa_assert(s);
470 pa_assert_ctl_context();
471
472 /* Please note that pa_sink_unlink() does more than simply
473 * reversing pa_sink_put(). It also undoes the registrations
474 * already done in pa_sink_new()! */
475
476 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
477 * may be called multiple times on the same sink without bad
478 * effects. */
479
480 linked = PA_SINK_IS_LINKED(s->state);
481
482 if (linked)
483 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
484
485 if (s->state != PA_SINK_UNLINKED)
486 pa_namereg_unregister(s->core, s->name);
487 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
488
489 if (s->card)
490 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
491
492 while ((i = pa_idxset_first(s->inputs, NULL))) {
493 pa_assert(i != j);
494 pa_sink_input_kill(i);
495 j = i;
496 }
497
498 if (linked)
499 sink_set_state(s, PA_SINK_UNLINKED);
500 else
501 s->state = PA_SINK_UNLINKED;
502
503 reset_callbacks(s);
504
505 if (s->monitor_source)
506 pa_source_unlink(s->monitor_source);
507
508 if (linked) {
509 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
510 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
511 }
512 }
513
514 /* Called from main context */
515 static void sink_free(pa_object *o) {
516 pa_sink *s = PA_SINK(o);
517 pa_sink_input *i;
518
519 pa_assert(s);
520 pa_assert_ctl_context();
521 pa_assert(pa_sink_refcnt(s) == 0);
522
523 if (PA_SINK_IS_LINKED(s->state))
524 pa_sink_unlink(s);
525
526 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
527
528 if (s->monitor_source) {
529 pa_source_unref(s->monitor_source);
530 s->monitor_source = NULL;
531 }
532
533 pa_idxset_free(s->inputs, NULL, NULL);
534
535 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
536 pa_sink_input_unref(i);
537
538 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
539
540 if (s->silence.memblock)
541 pa_memblock_unref(s->silence.memblock);
542
543 pa_xfree(s->name);
544 pa_xfree(s->driver);
545
546 if (s->proplist)
547 pa_proplist_free(s->proplist);
548
549 if (s->ports) {
550 pa_device_port *p;
551
552 while ((p = pa_hashmap_steal_first(s->ports)))
553 pa_device_port_free(p);
554
555 pa_hashmap_free(s->ports, NULL, NULL);
556 }
557
558 pa_xfree(s);
559 }
560
561 /* Called from main context, and not while the IO thread is active, please */
562 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
563 pa_sink_assert_ref(s);
564 pa_assert_ctl_context();
565
566 s->asyncmsgq = q;
567
568 if (s->monitor_source)
569 pa_source_set_asyncmsgq(s->monitor_source, q);
570 }
571
572 /* Called from main context, and not while the IO thread is active, please */
573 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
574 pa_sink_assert_ref(s);
575 pa_assert_ctl_context();
576
577 if (mask == 0)
578 return;
579
580 /* For now, allow only a minimal set of flags to be changed. */
581 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
582
583 s->flags = (s->flags & ~mask) | (value & mask);
584
585 pa_source_update_flags(s->monitor_source,
586 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
587 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
588 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
589 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
590 }
591
592 /* Called from IO context, or before _put() from main context */
593 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
594 pa_sink_assert_ref(s);
595 pa_sink_assert_io_context(s);
596
597 s->thread_info.rtpoll = p;
598
599 if (s->monitor_source)
600 pa_source_set_rtpoll(s->monitor_source, p);
601 }
602
603 /* Called from main context */
604 int pa_sink_update_status(pa_sink*s) {
605 pa_sink_assert_ref(s);
606 pa_assert_ctl_context();
607 pa_assert(PA_SINK_IS_LINKED(s->state));
608
609 if (s->state == PA_SINK_SUSPENDED)
610 return 0;
611
612 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
613 }
614
615 /* Called from main context */
616 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
617 pa_sink_assert_ref(s);
618 pa_assert_ctl_context();
619 pa_assert(PA_SINK_IS_LINKED(s->state));
620 pa_assert(cause != 0);
621
622 if (suspend) {
623 s->suspend_cause |= cause;
624 s->monitor_source->suspend_cause |= cause;
625 } else {
626 s->suspend_cause &= ~cause;
627 s->monitor_source->suspend_cause &= ~cause;
628 }
629
630 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
631 return 0;
632
633 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
634
635 if (s->suspend_cause)
636 return sink_set_state(s, PA_SINK_SUSPENDED);
637 else
638 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
639 }
640
641 /* Called from main context */
642 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
643 pa_sink_input *i, *n;
644 uint32_t idx;
645
646 pa_sink_assert_ref(s);
647 pa_assert_ctl_context();
648 pa_assert(PA_SINK_IS_LINKED(s->state));
649
650 if (!q)
651 q = pa_queue_new();
652
653 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
654 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
655
656 pa_sink_input_ref(i);
657
658 if (pa_sink_input_start_move(i) >= 0)
659 pa_queue_push(q, i);
660 else
661 pa_sink_input_unref(i);
662 }
663
664 return q;
665 }
666
667 /* Called from main context */
668 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
669 pa_sink_input *i;
670
671 pa_sink_assert_ref(s);
672 pa_assert_ctl_context();
673 pa_assert(PA_SINK_IS_LINKED(s->state));
674 pa_assert(q);
675
676 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
677 if (pa_sink_input_finish_move(i, s, save) < 0)
678 pa_sink_input_fail_move(i);
679
680 pa_sink_input_unref(i);
681 }
682
683 pa_queue_free(q, NULL, NULL);
684 }
685
686 /* Called from main context */
687 void pa_sink_move_all_fail(pa_queue *q) {
688 pa_sink_input *i;
689
690 pa_assert_ctl_context();
691 pa_assert(q);
692
693 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
694 pa_sink_input_fail_move(i);
695 pa_sink_input_unref(i);
696 }
697
698 pa_queue_free(q, NULL, NULL);
699 }
700
701 /* Called from IO thread context */
702 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
703 pa_sink_input *i;
704 void *state = NULL;
705
706 pa_sink_assert_ref(s);
707 pa_sink_assert_io_context(s);
708 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
709
710 /* If nobody requested this and this is actually no real rewind
711 * then we can short cut this. Please note that this means that
712 * not all rewind requests triggered upstream will always be
713 * translated in actual requests! */
714 if (!s->thread_info.rewind_requested && nbytes <= 0)
715 return;
716
717 s->thread_info.rewind_nbytes = 0;
718 s->thread_info.rewind_requested = FALSE;
719
720 if (s->thread_info.state == PA_SINK_SUSPENDED)
721 return;
722
723 if (nbytes > 0)
724 pa_log_debug("Processing rewind...");
725
726 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
727 pa_sink_input_assert_ref(i);
728 pa_sink_input_process_rewind(i, nbytes);
729 }
730
731 if (nbytes > 0)
732 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
733 pa_source_process_rewind(s->monitor_source, nbytes);
734 }
735
736 /* Called from IO thread context */
737 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
738 pa_sink_input *i;
739 unsigned n = 0;
740 void *state = NULL;
741 size_t mixlength = *length;
742
743 pa_sink_assert_ref(s);
744 pa_sink_assert_io_context(s);
745 pa_assert(info);
746
747 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
748 pa_sink_input_assert_ref(i);
749
750 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
751
752 if (mixlength == 0 || info->chunk.length < mixlength)
753 mixlength = info->chunk.length;
754
755 if (pa_memblock_is_silence(info->chunk.memblock)) {
756 pa_memblock_unref(info->chunk.memblock);
757 continue;
758 }
759
760 info->userdata = pa_sink_input_ref(i);
761
762 pa_assert(info->chunk.memblock);
763 pa_assert(info->chunk.length > 0);
764
765 info++;
766 n++;
767 maxinfo--;
768 }
769
770 if (mixlength > 0)
771 *length = mixlength;
772
773 return n;
774 }
775
776 /* Called from IO thread context */
777 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
778 pa_sink_input *i;
779 void *state = NULL;
780 unsigned p = 0;
781 unsigned n_unreffed = 0;
782
783 pa_sink_assert_ref(s);
784 pa_sink_assert_io_context(s);
785 pa_assert(result);
786 pa_assert(result->memblock);
787 pa_assert(result->length > 0);
788
789 /* We optimize for the case where the order of the inputs has not changed */
790
791 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
792 unsigned j;
793 pa_mix_info* m = NULL;
794
795 pa_sink_input_assert_ref(i);
796
797 /* Let's try to find the matching entry info the pa_mix_info array */
798 for (j = 0; j < n; j ++) {
799
800 if (info[p].userdata == i) {
801 m = info + p;
802 break;
803 }
804
805 p++;
806 if (p >= n)
807 p = 0;
808 }
809
810 /* Drop read data */
811 pa_sink_input_drop(i, result->length);
812
813 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
814
815 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
816 void *ostate = NULL;
817 pa_source_output *o;
818 pa_memchunk c;
819
820 if (m && m->chunk.memblock) {
821 c = m->chunk;
822 pa_memblock_ref(c.memblock);
823 pa_assert(result->length <= c.length);
824 c.length = result->length;
825
826 pa_memchunk_make_writable(&c, 0);
827 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
828 } else {
829 c = s->silence;
830 pa_memblock_ref(c.memblock);
831 pa_assert(result->length <= c.length);
832 c.length = result->length;
833 }
834
835 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
836 pa_source_output_assert_ref(o);
837 pa_assert(o->direct_on_input == i);
838 pa_source_post_direct(s->monitor_source, o, &c);
839 }
840
841 pa_memblock_unref(c.memblock);
842 }
843 }
844
845 if (m) {
846 if (m->chunk.memblock)
847 pa_memblock_unref(m->chunk.memblock);
848 pa_memchunk_reset(&m->chunk);
849
850 pa_sink_input_unref(m->userdata);
851 m->userdata = NULL;
852
853 n_unreffed += 1;
854 }
855 }
856
857 /* Now drop references to entries that are included in the
858 * pa_mix_info array but don't exist anymore */
859
860 if (n_unreffed < n) {
861 for (; n > 0; info++, n--) {
862 if (info->userdata)
863 pa_sink_input_unref(info->userdata);
864 if (info->chunk.memblock)
865 pa_memblock_unref(info->chunk.memblock);
866 }
867 }
868
869 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
870 pa_source_post(s->monitor_source, result);
871 }
872
873 /* Called from IO thread context */
874 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
875 pa_mix_info info[MAX_MIX_CHANNELS];
876 unsigned n;
877 size_t block_size_max;
878
879 pa_sink_assert_ref(s);
880 pa_sink_assert_io_context(s);
881 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
882 pa_assert(pa_frame_aligned(length, &s->sample_spec));
883 pa_assert(result);
884
885 pa_sink_ref(s);
886
887 pa_assert(!s->thread_info.rewind_requested);
888 pa_assert(s->thread_info.rewind_nbytes == 0);
889
890 if (s->thread_info.state == PA_SINK_SUSPENDED) {
891 result->memblock = pa_memblock_ref(s->silence.memblock);
892 result->index = s->silence.index;
893 result->length = PA_MIN(s->silence.length, length);
894 return;
895 }
896
897 if (length <= 0)
898 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
899
900 block_size_max = pa_mempool_block_size_max(s->core->mempool);
901 if (length > block_size_max)
902 length = pa_frame_align(block_size_max, &s->sample_spec);
903
904 pa_assert(length > 0);
905
906 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
907
908 if (n == 0) {
909
910 *result = s->silence;
911 pa_memblock_ref(result->memblock);
912
913 if (result->length > length)
914 result->length = length;
915
916 } else if (n == 1) {
917 pa_cvolume volume;
918
919 *result = info[0].chunk;
920 pa_memblock_ref(result->memblock);
921
922 if (result->length > length)
923 result->length = length;
924
925 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
926
927 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
928 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
929 pa_memblock_unref(result->memblock);
930 pa_silence_memchunk_get(&s->core->silence_cache,
931 s->core->mempool,
932 result,
933 &s->sample_spec,
934 result->length);
935 } else {
936 pa_memchunk_make_writable(result, 0);
937 pa_volume_memchunk(result, &s->sample_spec, &volume);
938 }
939 }
940 } else {
941 void *ptr;
942 result->memblock = pa_memblock_new(s->core->mempool, length);
943
944 ptr = pa_memblock_acquire(result->memblock);
945 result->length = pa_mix(info, n,
946 ptr, length,
947 &s->sample_spec,
948 &s->thread_info.soft_volume,
949 s->thread_info.soft_muted);
950 pa_memblock_release(result->memblock);
951
952 result->index = 0;
953 }
954
955 inputs_drop(s, info, n, result);
956
957 pa_sink_unref(s);
958 }
959
960 /* Called from IO thread context */
961 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
962 pa_mix_info info[MAX_MIX_CHANNELS];
963 unsigned n;
964 size_t length, block_size_max;
965
966 pa_sink_assert_ref(s);
967 pa_sink_assert_io_context(s);
968 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
969 pa_assert(target);
970 pa_assert(target->memblock);
971 pa_assert(target->length > 0);
972 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
973
974 pa_sink_ref(s);
975
976 pa_assert(!s->thread_info.rewind_requested);
977 pa_assert(s->thread_info.rewind_nbytes == 0);
978
979 if (s->thread_info.state == PA_SINK_SUSPENDED) {
980 pa_silence_memchunk(target, &s->sample_spec);
981 return;
982 }
983
984 length = target->length;
985 block_size_max = pa_mempool_block_size_max(s->core->mempool);
986 if (length > block_size_max)
987 length = pa_frame_align(block_size_max, &s->sample_spec);
988
989 pa_assert(length > 0);
990
991 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
992
993 if (n == 0) {
994 if (target->length > length)
995 target->length = length;
996
997 pa_silence_memchunk(target, &s->sample_spec);
998 } else if (n == 1) {
999 pa_cvolume volume;
1000
1001 if (target->length > length)
1002 target->length = length;
1003
1004 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1005
1006 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1007 pa_silence_memchunk(target, &s->sample_spec);
1008 else {
1009 pa_memchunk vchunk;
1010
1011 vchunk = info[0].chunk;
1012 pa_memblock_ref(vchunk.memblock);
1013
1014 if (vchunk.length > length)
1015 vchunk.length = length;
1016
1017 if (!pa_cvolume_is_norm(&volume)) {
1018 pa_memchunk_make_writable(&vchunk, 0);
1019 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1020 }
1021
1022 pa_memchunk_memcpy(target, &vchunk);
1023 pa_memblock_unref(vchunk.memblock);
1024 }
1025
1026 } else {
1027 void *ptr;
1028
1029 ptr = pa_memblock_acquire(target->memblock);
1030
1031 target->length = pa_mix(info, n,
1032 (uint8_t*) ptr + target->index, length,
1033 &s->sample_spec,
1034 &s->thread_info.soft_volume,
1035 s->thread_info.soft_muted);
1036
1037 pa_memblock_release(target->memblock);
1038 }
1039
1040 inputs_drop(s, info, n, target);
1041
1042 pa_sink_unref(s);
1043 }
1044
1045 /* Called from IO thread context */
1046 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1047 pa_memchunk chunk;
1048 size_t l, d;
1049
1050 pa_sink_assert_ref(s);
1051 pa_sink_assert_io_context(s);
1052 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1053 pa_assert(target);
1054 pa_assert(target->memblock);
1055 pa_assert(target->length > 0);
1056 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1057
1058 pa_sink_ref(s);
1059
1060 pa_assert(!s->thread_info.rewind_requested);
1061 pa_assert(s->thread_info.rewind_nbytes == 0);
1062
1063 l = target->length;
1064 d = 0;
1065 while (l > 0) {
1066 chunk = *target;
1067 chunk.index += d;
1068 chunk.length -= d;
1069
1070 pa_sink_render_into(s, &chunk);
1071
1072 d += chunk.length;
1073 l -= chunk.length;
1074 }
1075
1076 pa_sink_unref(s);
1077 }
1078
1079 /* Called from IO thread context */
1080 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1081 pa_mix_info info[MAX_MIX_CHANNELS];
1082 size_t length1st = length;
1083 unsigned n;
1084
1085 pa_sink_assert_ref(s);
1086 pa_sink_assert_io_context(s);
1087 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1088 pa_assert(length > 0);
1089 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1090 pa_assert(result);
1091
1092 pa_sink_ref(s);
1093
1094 pa_assert(!s->thread_info.rewind_requested);
1095 pa_assert(s->thread_info.rewind_nbytes == 0);
1096
1097 pa_assert(length > 0);
1098
1099 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1100
1101 if (n == 0) {
1102 pa_silence_memchunk_get(&s->core->silence_cache,
1103 s->core->mempool,
1104 result,
1105 &s->sample_spec,
1106 length1st);
1107 } else if (n == 1) {
1108 pa_cvolume volume;
1109
1110 *result = info[0].chunk;
1111 pa_memblock_ref(result->memblock);
1112
1113 if (result->length > length)
1114 result->length = length;
1115
1116 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1117
1118 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1119 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1120 pa_memblock_unref(result->memblock);
1121 pa_silence_memchunk_get(&s->core->silence_cache,
1122 s->core->mempool,
1123 result,
1124 &s->sample_spec,
1125 result->length);
1126 } else {
1127 pa_memchunk_make_writable(result, length);
1128 pa_volume_memchunk(result, &s->sample_spec, &volume);
1129 }
1130 }
1131 } else {
1132 void *ptr;
1133
1134 result->index = 0;
1135 result->memblock = pa_memblock_new(s->core->mempool, length);
1136
1137 ptr = pa_memblock_acquire(result->memblock);
1138
1139 result->length = pa_mix(info, n,
1140 (uint8_t*) ptr + result->index, length1st,
1141 &s->sample_spec,
1142 &s->thread_info.soft_volume,
1143 s->thread_info.soft_muted);
1144
1145 pa_memblock_release(result->memblock);
1146 }
1147
1148 inputs_drop(s, info, n, result);
1149
1150 if (result->length < length) {
1151 pa_memchunk chunk;
1152 size_t l, d;
1153 pa_memchunk_make_writable(result, length);
1154
1155 l = length - result->length;
1156 d = result->index + result->length;
1157 while (l > 0) {
1158 chunk = *result;
1159 chunk.index = d;
1160 chunk.length = l;
1161
1162 pa_sink_render_into(s, &chunk);
1163
1164 d += chunk.length;
1165 l -= chunk.length;
1166 }
1167 result->length = length;
1168 }
1169
1170 pa_sink_unref(s);
1171 }
1172
1173 /* Called from main thread */
1174 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1175 pa_usec_t usec = 0;
1176
1177 pa_sink_assert_ref(s);
1178 pa_assert_ctl_context();
1179 pa_assert(PA_SINK_IS_LINKED(s->state));
1180
1181 /* The returned value is supposed to be in the time domain of the sound card! */
1182
1183 if (s->state == PA_SINK_SUSPENDED)
1184 return 0;
1185
1186 if (!(s->flags & PA_SINK_LATENCY))
1187 return 0;
1188
1189 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1190
1191 return usec;
1192 }
1193
1194 /* Called from IO thread */
1195 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1196 pa_usec_t usec = 0;
1197 pa_msgobject *o;
1198
1199 pa_sink_assert_ref(s);
1200 pa_sink_assert_io_context(s);
1201 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1202
1203 /* The returned value is supposed to be in the time domain of the sound card! */
1204
1205 if (s->thread_info.state == PA_SINK_SUSPENDED)
1206 return 0;
1207
1208 if (!(s->flags & PA_SINK_LATENCY))
1209 return 0;
1210
1211 o = PA_MSGOBJECT(s);
1212
1213 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1214
1215 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1216 return -1;
1217
1218 return usec;
1219 }
1220
1221 /* Called from main context */
1222 static void compute_reference_ratios(pa_sink *s) {
1223 uint32_t idx;
1224 pa_sink_input *i;
1225
1226 pa_sink_assert_ref(s);
1227 pa_assert_ctl_context();
1228 pa_assert(PA_SINK_IS_LINKED(s->state));
1229 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1230
1231 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1232 unsigned c;
1233 pa_cvolume remapped;
1234
1235 /*
1236 * Calculates the reference volume from the sink's reference
1237 * volume. This basically calculates:
1238 *
1239 * i->reference_ratio = i->volume / s->reference_volume
1240 */
1241
1242 remapped = s->reference_volume;
1243 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1244
1245 i->reference_ratio.channels = i->sample_spec.channels;
1246
1247 for (c = 0; c < i->sample_spec.channels; c++) {
1248
1249 /* We don't update when the sink volume is 0 anyway */
1250 if (remapped.values[c] <= PA_VOLUME_MUTED)
1251 continue;
1252
1253 /* Don't update the reference ratio unless necessary */
1254 if (pa_sw_volume_multiply(
1255 i->reference_ratio.values[c],
1256 remapped.values[c]) == i->volume.values[c])
1257 continue;
1258
1259 i->reference_ratio.values[c] = pa_sw_volume_divide(
1260 i->volume.values[c],
1261 remapped.values[c]);
1262 }
1263 }
1264 }
1265
1266 /* Called from main context */
1267 static void compute_real_ratios(pa_sink *s) {
1268 pa_sink_input *i;
1269 uint32_t idx;
1270
1271 pa_sink_assert_ref(s);
1272 pa_assert_ctl_context();
1273 pa_assert(PA_SINK_IS_LINKED(s->state));
1274 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1275
1276 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1277 unsigned c;
1278 pa_cvolume remapped;
1279
1280 /*
1281 * This basically calculates:
1282 *
1283 * i->real_ratio := i->volume / s->real_volume
1284 * i->soft_volume := i->real_ratio * i->volume_factor
1285 */
1286
1287 remapped = s->real_volume;
1288 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1289
1290 i->real_ratio.channels = i->sample_spec.channels;
1291 i->soft_volume.channels = i->sample_spec.channels;
1292
1293 for (c = 0; c < i->sample_spec.channels; c++) {
1294
1295 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1296 /* We leave i->real_ratio untouched */
1297 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1298 continue;
1299 }
1300
1301 /* Don't lose accuracy unless necessary */
1302 if (pa_sw_volume_multiply(
1303 i->real_ratio.values[c],
1304 remapped.values[c]) != i->volume.values[c])
1305
1306 i->real_ratio.values[c] = pa_sw_volume_divide(
1307 i->volume.values[c],
1308 remapped.values[c]);
1309
1310 i->soft_volume.values[c] = pa_sw_volume_multiply(
1311 i->real_ratio.values[c],
1312 i->volume_factor.values[c]);
1313 }
1314
1315 /* We don't copy the soft_volume to the thread_info data
1316 * here. That must be done by the caller */
1317 }
1318 }
1319
1320 /* Called from main thread */
1321 static void compute_real_volume(pa_sink *s) {
1322 pa_sink_input *i;
1323 uint32_t idx;
1324
1325 pa_sink_assert_ref(s);
1326 pa_assert_ctl_context();
1327 pa_assert(PA_SINK_IS_LINKED(s->state));
1328 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1329
1330 /* This determines the maximum volume of all streams and sets
1331 * s->real_volume accordingly. */
1332
1333 if (pa_idxset_isempty(s->inputs)) {
1334 /* In the special case that we have no sink input we leave the
1335 * volume unmodified. */
1336 s->real_volume = s->reference_volume;
1337 return;
1338 }
1339
1340 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1341
1342 /* First let's determine the new maximum volume of all inputs
1343 * connected to this sink */
1344 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1345 pa_cvolume remapped;
1346
1347 remapped = i->volume;
1348 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1349 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1350 }
1351
1352 /* Then, let's update the real ratios/soft volumes of all inputs
1353 * connected to this sink */
1354 compute_real_ratios(s);
1355 }
1356
1357 /* Called from main thread */
1358 static void propagate_reference_volume(pa_sink *s) {
1359 pa_sink_input *i;
1360 uint32_t idx;
1361
1362 pa_sink_assert_ref(s);
1363 pa_assert_ctl_context();
1364 pa_assert(PA_SINK_IS_LINKED(s->state));
1365 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1366
1367 /* This is called whenever the sink volume changes that is not
1368 * caused by a sink input volume change. We need to fix up the
1369 * sink input volumes accordingly */
1370
1371 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1372 pa_cvolume old_volume, remapped;
1373
1374 old_volume = i->volume;
1375
1376 /* This basically calculates:
1377 *
1378 * i->volume := s->reference_volume * i->reference_ratio */
1379
1380 remapped = s->reference_volume;
1381 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1382 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1383
1384 /* The volume changed, let's tell people so */
1385 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1386
1387 if (i->volume_changed)
1388 i->volume_changed(i);
1389
1390 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1391 }
1392 }
1393 }
1394
1395 /* Called from main thread */
1396 void pa_sink_set_volume(
1397 pa_sink *s,
1398 const pa_cvolume *volume,
1399 pa_bool_t sendmsg,
1400 pa_bool_t save) {
1401
1402 pa_cvolume old_reference_volume;
1403 pa_bool_t reference_changed;
1404
1405 pa_sink_assert_ref(s);
1406 pa_assert_ctl_context();
1407 pa_assert(PA_SINK_IS_LINKED(s->state));
1408 pa_assert(!volume || pa_cvolume_valid(volume));
1409 pa_assert(!volume || pa_cvolume_compatible(volume, &s->sample_spec));
1410 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1411
1412 /* If volume is NULL we synchronize the sink's real and reference
1413 * volumes with the stream volumes. If it is not NULL we update
1414 * the reference_volume with it. */
1415
1416 old_reference_volume = s->reference_volume;
1417
1418 if (volume) {
1419
1420 s->reference_volume = *volume;
1421
1422 if (s->flags & PA_SINK_FLAT_VOLUME) {
1423 /* OK, propagate this volume change back to the inputs */
1424 propagate_reference_volume(s);
1425
1426 /* And now recalculate the real volume */
1427 compute_real_volume(s);
1428 } else
1429 s->real_volume = s->reference_volume;
1430
1431 } else {
1432 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1433
1434 /* Ok, let's determine the new real volume */
1435 compute_real_volume(s);
1436
1437 /* Let's 'push' the reference volume if necessary */
1438 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1439
1440 /* We need to fix the reference ratios of all streams now that
1441 * we changed the reference volume */
1442 compute_reference_ratios(s);
1443 }
1444
1445 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1446 s->save_volume = (!reference_changed && s->save_volume) || save;
1447
1448 if (s->set_volume) {
1449 /* If we have a function set_volume(), then we do not apply a
1450 * soft volume by default. However, set_volume() is free to
1451 * apply one to s->soft_volume */
1452
1453 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1454 s->set_volume(s);
1455
1456 } else
1457 /* If we have no function set_volume(), then the soft volume
1458 * becomes the virtual volume */
1459 s->soft_volume = s->real_volume;
1460
1461 /* This tells the sink that soft and/or virtual volume changed */
1462 if (sendmsg)
1463 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1464
1465 if (reference_changed)
1466 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1467 }
1468
1469 /* Called from main thread. Only to be called by sink implementor */
1470 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1471 pa_sink_assert_ref(s);
1472 pa_assert_ctl_context();
1473
1474 if (!volume)
1475 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1476 else
1477 s->soft_volume = *volume;
1478
1479 if (PA_SINK_IS_LINKED(s->state))
1480 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1481 else
1482 s->thread_info.soft_volume = s->soft_volume;
1483 }
1484
1485 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1486 pa_sink_input *i;
1487 uint32_t idx;
1488 pa_cvolume old_reference_volume;
1489
1490 pa_sink_assert_ref(s);
1491 pa_assert_ctl_context();
1492 pa_assert(PA_SINK_IS_LINKED(s->state));
1493
1494 /* This is called when the hardware's real volume changes due to
1495 * some external event. We copy the real volume into our
1496 * reference volume and then rebuild the stream volumes based on
1497 * i->real_ratio which should stay fixed. */
1498
1499 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1500 return;
1501
1502 old_reference_volume = s->reference_volume;
1503
1504 /* 1. Make the real volume the reference volume */
1505 s->reference_volume = s->real_volume;
1506
1507 if (s->flags & PA_SINK_FLAT_VOLUME) {
1508
1509 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1510 pa_cvolume old_volume, remapped;
1511
1512 old_volume = i->volume;
1513
1514 /* 2. Since the sink's reference and real volumes are equal
1515 * now our ratios should be too. */
1516 i->reference_ratio = i->real_ratio;
1517
1518 /* 3. Recalculate the new stream reference volume based on the
1519 * reference ratio and the sink's reference volume.
1520 *
1521 * This basically calculates:
1522 *
1523 * i->volume = s->reference_volume * i->reference_ratio
1524 *
1525 * This is identical to propagate_reference_volume() */
1526 remapped = s->reference_volume;
1527 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1528 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1529
1530 /* Notify if something changed */
1531 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1532
1533 if (i->volume_changed)
1534 i->volume_changed(i);
1535
1536 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1537 }
1538 }
1539 }
1540
1541 /* Something got changed in the hardware. It probably makes sense
1542 * to save changed hw settings given that hw volume changes not
1543 * triggered by PA are almost certainly done by the user. */
1544 s->save_volume = TRUE;
1545
1546 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1547 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1548 }
1549
1550 /* Called from main thread */
1551 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1552 pa_sink_assert_ref(s);
1553 pa_assert_ctl_context();
1554 pa_assert(PA_SINK_IS_LINKED(s->state));
1555
1556 if (s->refresh_volume || force_refresh) {
1557 struct pa_cvolume old_real_volume;
1558
1559 old_real_volume = s->real_volume;
1560
1561 if (s->get_volume)
1562 s->get_volume(s);
1563
1564 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1565
1566 propagate_real_volume(s, &old_real_volume);
1567 }
1568
1569 return &s->reference_volume;
1570 }
1571
1572 /* Called from main thread */
1573 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1574 pa_cvolume old_real_volume;
1575
1576 pa_sink_assert_ref(s);
1577 pa_assert_ctl_context();
1578 pa_assert(PA_SINK_IS_LINKED(s->state));
1579
1580 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1581
1582 old_real_volume = s->real_volume;
1583 s->real_volume = *new_real_volume;
1584
1585 propagate_real_volume(s, &old_real_volume);
1586 }
1587
1588 /* Called from main thread */
1589 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1590 pa_bool_t old_muted;
1591
1592 pa_sink_assert_ref(s);
1593 pa_assert_ctl_context();
1594 pa_assert(PA_SINK_IS_LINKED(s->state));
1595
1596 old_muted = s->muted;
1597 s->muted = mute;
1598 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1599
1600 if (s->set_mute)
1601 s->set_mute(s);
1602
1603 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1604
1605 if (old_muted != s->muted)
1606 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1607 }
1608
1609 /* Called from main thread */
1610 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1611
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1614 pa_assert(PA_SINK_IS_LINKED(s->state));
1615
1616 if (s->refresh_muted || force_refresh) {
1617 pa_bool_t old_muted = s->muted;
1618
1619 if (s->get_mute)
1620 s->get_mute(s);
1621
1622 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1623
1624 if (old_muted != s->muted) {
1625 s->save_muted = TRUE;
1626
1627 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1628
1629 /* Make sure the soft mute status stays in sync */
1630 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1631 }
1632 }
1633
1634 return s->muted;
1635 }
1636
1637 /* Called from main thread */
1638 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1639 pa_sink_assert_ref(s);
1640 pa_assert_ctl_context();
1641 pa_assert(PA_SINK_IS_LINKED(s->state));
1642
1643 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1644
1645 if (s->muted == new_muted)
1646 return;
1647
1648 s->muted = new_muted;
1649 s->save_muted = TRUE;
1650
1651 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1652 }
1653
1654 /* Called from main thread */
1655 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1656 pa_sink_assert_ref(s);
1657 pa_assert_ctl_context();
1658
1659 if (p)
1660 pa_proplist_update(s->proplist, mode, p);
1661
1662 if (PA_SINK_IS_LINKED(s->state)) {
1663 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1664 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1665 }
1666
1667 return TRUE;
1668 }
1669
1670 /* Called from main thread */
1671 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1672 void pa_sink_set_description(pa_sink *s, const char *description) {
1673 const char *old;
1674 pa_sink_assert_ref(s);
1675 pa_assert_ctl_context();
1676
1677 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1678 return;
1679
1680 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1681
1682 if (old && description && pa_streq(old, description))
1683 return;
1684
1685 if (description)
1686 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1687 else
1688 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1689
1690 if (s->monitor_source) {
1691 char *n;
1692
1693 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1694 pa_source_set_description(s->monitor_source, n);
1695 pa_xfree(n);
1696 }
1697
1698 if (PA_SINK_IS_LINKED(s->state)) {
1699 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1700 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1701 }
1702 }
1703
1704 /* Called from main thread */
1705 unsigned pa_sink_linked_by(pa_sink *s) {
1706 unsigned ret;
1707
1708 pa_sink_assert_ref(s);
1709 pa_assert_ctl_context();
1710 pa_assert(PA_SINK_IS_LINKED(s->state));
1711
1712 ret = pa_idxset_size(s->inputs);
1713
1714 /* We add in the number of streams connected to us here. Please
1715 * note the asymmmetry to pa_sink_used_by()! */
1716
1717 if (s->monitor_source)
1718 ret += pa_source_linked_by(s->monitor_source);
1719
1720 return ret;
1721 }
1722
1723 /* Called from main thread */
1724 unsigned pa_sink_used_by(pa_sink *s) {
1725 unsigned ret;
1726
1727 pa_sink_assert_ref(s);
1728 pa_assert_ctl_context();
1729 pa_assert(PA_SINK_IS_LINKED(s->state));
1730
1731 ret = pa_idxset_size(s->inputs);
1732 pa_assert(ret >= s->n_corked);
1733
1734 /* Streams connected to our monitor source do not matter for
1735 * pa_sink_used_by()!.*/
1736
1737 return ret - s->n_corked;
1738 }
1739
1740 /* Called from main thread */
1741 unsigned pa_sink_check_suspend(pa_sink *s) {
1742 unsigned ret;
1743 pa_sink_input *i;
1744 uint32_t idx;
1745
1746 pa_sink_assert_ref(s);
1747 pa_assert_ctl_context();
1748
1749 if (!PA_SINK_IS_LINKED(s->state))
1750 return 0;
1751
1752 ret = 0;
1753
1754 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1755 pa_sink_input_state_t st;
1756
1757 st = pa_sink_input_get_state(i);
1758 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1759
1760 if (st == PA_SINK_INPUT_CORKED)
1761 continue;
1762
1763 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1764 continue;
1765
1766 ret ++;
1767 }
1768
1769 if (s->monitor_source)
1770 ret += pa_source_check_suspend(s->monitor_source);
1771
1772 return ret;
1773 }
1774
1775 /* Called from the IO thread */
1776 static void sync_input_volumes_within_thread(pa_sink *s) {
1777 pa_sink_input *i;
1778 void *state = NULL;
1779
1780 pa_sink_assert_ref(s);
1781 pa_sink_assert_io_context(s);
1782
1783 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1784 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1785 continue;
1786
1787 i->thread_info.soft_volume = i->soft_volume;
1788 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1789 }
1790 }
1791
1792 /* Called from IO thread, except when it is not */
1793 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1794 pa_sink *s = PA_SINK(o);
1795 pa_sink_assert_ref(s);
1796
1797 switch ((pa_sink_message_t) code) {
1798
1799 case PA_SINK_MESSAGE_ADD_INPUT: {
1800 pa_sink_input *i = PA_SINK_INPUT(userdata);
1801
1802 /* If you change anything here, make sure to change the
1803 * sink input handling a few lines down at
1804 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1805
1806 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1807
1808 /* Since the caller sleeps in pa_sink_input_put(), we can
1809 * safely access data outside of thread_info even though
1810 * it is mutable */
1811
1812 if ((i->thread_info.sync_prev = i->sync_prev)) {
1813 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1814 pa_assert(i->sync_prev->sync_next == i);
1815 i->thread_info.sync_prev->thread_info.sync_next = i;
1816 }
1817
1818 if ((i->thread_info.sync_next = i->sync_next)) {
1819 pa_assert(i->sink == i->thread_info.sync_next->sink);
1820 pa_assert(i->sync_next->sync_prev == i);
1821 i->thread_info.sync_next->thread_info.sync_prev = i;
1822 }
1823
1824 pa_assert(!i->thread_info.attached);
1825 i->thread_info.attached = TRUE;
1826
1827 if (i->attach)
1828 i->attach(i);
1829
1830 pa_sink_input_set_state_within_thread(i, i->state);
1831
1832 /* The requested latency of the sink input needs to be
1833 * fixed up and then configured on the sink */
1834
1835 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1836 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1837
1838 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1839 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1840
1841 /* We don't rewind here automatically. This is left to the
1842 * sink input implementor because some sink inputs need a
1843 * slow start, i.e. need some time to buffer client
1844 * samples before beginning streaming. */
1845
1846 /* In flat volume mode we need to update the volume as
1847 * well */
1848 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1849 }
1850
1851 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1852 pa_sink_input *i = PA_SINK_INPUT(userdata);
1853
1854 /* If you change anything here, make sure to change the
1855 * sink input handling a few lines down at
1856 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1857
1858 if (i->detach)
1859 i->detach(i);
1860
1861 pa_sink_input_set_state_within_thread(i, i->state);
1862
1863 pa_assert(i->thread_info.attached);
1864 i->thread_info.attached = FALSE;
1865
1866 /* Since the caller sleeps in pa_sink_input_unlink(),
1867 * we can safely access data outside of thread_info even
1868 * though it is mutable */
1869
1870 pa_assert(!i->sync_prev);
1871 pa_assert(!i->sync_next);
1872
1873 if (i->thread_info.sync_prev) {
1874 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1875 i->thread_info.sync_prev = NULL;
1876 }
1877
1878 if (i->thread_info.sync_next) {
1879 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1880 i->thread_info.sync_next = NULL;
1881 }
1882
1883 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1884 pa_sink_input_unref(i);
1885
1886 pa_sink_invalidate_requested_latency(s, TRUE);
1887 pa_sink_request_rewind(s, (size_t) -1);
1888
1889 /* In flat volume mode we need to update the volume as
1890 * well */
1891 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1892 }
1893
1894 case PA_SINK_MESSAGE_START_MOVE: {
1895 pa_sink_input *i = PA_SINK_INPUT(userdata);
1896
1897 /* We don't support moving synchronized streams. */
1898 pa_assert(!i->sync_prev);
1899 pa_assert(!i->sync_next);
1900 pa_assert(!i->thread_info.sync_next);
1901 pa_assert(!i->thread_info.sync_prev);
1902
1903 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1904 pa_usec_t usec = 0;
1905 size_t sink_nbytes, total_nbytes;
1906
1907 /* Get the latency of the sink */
1908 usec = pa_sink_get_latency_within_thread(s);
1909 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1910 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1911
1912 if (total_nbytes > 0) {
1913 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1914 i->thread_info.rewrite_flush = TRUE;
1915 pa_sink_input_process_rewind(i, sink_nbytes);
1916 }
1917 }
1918
1919 if (i->detach)
1920 i->detach(i);
1921
1922 pa_assert(i->thread_info.attached);
1923 i->thread_info.attached = FALSE;
1924
1925 /* Let's remove the sink input ...*/
1926 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1927 pa_sink_input_unref(i);
1928
1929 pa_sink_invalidate_requested_latency(s, TRUE);
1930
1931 pa_log_debug("Requesting rewind due to started move");
1932 pa_sink_request_rewind(s, (size_t) -1);
1933
1934 /* In flat volume mode we need to update the volume as
1935 * well */
1936 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1937 }
1938
1939 case PA_SINK_MESSAGE_FINISH_MOVE: {
1940 pa_sink_input *i = PA_SINK_INPUT(userdata);
1941
1942 /* We don't support moving synchronized streams. */
1943 pa_assert(!i->sync_prev);
1944 pa_assert(!i->sync_next);
1945 pa_assert(!i->thread_info.sync_next);
1946 pa_assert(!i->thread_info.sync_prev);
1947
1948 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1949
1950 pa_assert(!i->thread_info.attached);
1951 i->thread_info.attached = TRUE;
1952
1953 if (i->attach)
1954 i->attach(i);
1955
1956 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1957 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1958
1959 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1960 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1961
1962 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1963 pa_usec_t usec = 0;
1964 size_t nbytes;
1965
1966 /* Get the latency of the sink */
1967 usec = pa_sink_get_latency_within_thread(s);
1968 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1969
1970 if (nbytes > 0)
1971 pa_sink_input_drop(i, nbytes);
1972
1973 pa_log_debug("Requesting rewind due to finished move");
1974 pa_sink_request_rewind(s, nbytes);
1975 }
1976
1977 /* In flat volume mode we need to update the volume as
1978 * well */
1979 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1980 }
1981
1982 case PA_SINK_MESSAGE_SET_VOLUME:
1983
1984 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1985 s->thread_info.soft_volume = s->soft_volume;
1986 pa_sink_request_rewind(s, (size_t) -1);
1987 }
1988
1989 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1990 return 0;
1991
1992 /* Fall through ... */
1993
1994 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1995 sync_input_volumes_within_thread(s);
1996 return 0;
1997
1998 case PA_SINK_MESSAGE_GET_VOLUME:
1999 return 0;
2000
2001 case PA_SINK_MESSAGE_SET_MUTE:
2002
2003 if (s->thread_info.soft_muted != s->muted) {
2004 s->thread_info.soft_muted = s->muted;
2005 pa_sink_request_rewind(s, (size_t) -1);
2006 }
2007
2008 return 0;
2009
2010 case PA_SINK_MESSAGE_GET_MUTE:
2011 return 0;
2012
2013 case PA_SINK_MESSAGE_SET_STATE: {
2014
2015 pa_bool_t suspend_change =
2016 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2017 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2018
2019 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2020
2021 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2022 s->thread_info.rewind_nbytes = 0;
2023 s->thread_info.rewind_requested = FALSE;
2024 }
2025
2026 if (suspend_change) {
2027 pa_sink_input *i;
2028 void *state = NULL;
2029
2030 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2031 if (i->suspend_within_thread)
2032 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2033 }
2034
2035 return 0;
2036 }
2037
2038 case PA_SINK_MESSAGE_DETACH:
2039
2040 /* Detach all streams */
2041 pa_sink_detach_within_thread(s);
2042 return 0;
2043
2044 case PA_SINK_MESSAGE_ATTACH:
2045
2046 /* Reattach all streams */
2047 pa_sink_attach_within_thread(s);
2048 return 0;
2049
2050 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2051
2052 pa_usec_t *usec = userdata;
2053 *usec = pa_sink_get_requested_latency_within_thread(s);
2054
2055 /* Yes, that's right, the IO thread will see -1 when no
2056 * explicit requested latency is configured, the main
2057 * thread will see max_latency */
2058 if (*usec == (pa_usec_t) -1)
2059 *usec = s->thread_info.max_latency;
2060
2061 return 0;
2062 }
2063
2064 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2065 pa_usec_t *r = userdata;
2066
2067 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2068
2069 return 0;
2070 }
2071
2072 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2073 pa_usec_t *r = userdata;
2074
2075 r[0] = s->thread_info.min_latency;
2076 r[1] = s->thread_info.max_latency;
2077
2078 return 0;
2079 }
2080
2081 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2082
2083 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2084 return 0;
2085
2086 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2087
2088 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2089 return 0;
2090
2091 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2092
2093 *((size_t*) userdata) = s->thread_info.max_rewind;
2094 return 0;
2095
2096 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2097
2098 *((size_t*) userdata) = s->thread_info.max_request;
2099 return 0;
2100
2101 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2102
2103 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2104 return 0;
2105
2106 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2107
2108 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2109 return 0;
2110
2111 case PA_SINK_MESSAGE_GET_LATENCY:
2112 case PA_SINK_MESSAGE_MAX:
2113 ;
2114 }
2115
2116 return -1;
2117 }
2118
2119 /* Called from main thread */
2120 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2121 pa_sink *sink;
2122 uint32_t idx;
2123 int ret = 0;
2124
2125 pa_core_assert_ref(c);
2126 pa_assert_ctl_context();
2127 pa_assert(cause != 0);
2128
2129 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2130 int r;
2131
2132 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2133 ret = r;
2134 }
2135
2136 return ret;
2137 }
2138
2139 /* Called from main thread */
2140 void pa_sink_detach(pa_sink *s) {
2141 pa_sink_assert_ref(s);
2142 pa_assert_ctl_context();
2143 pa_assert(PA_SINK_IS_LINKED(s->state));
2144
2145 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2146 }
2147
2148 /* Called from main thread */
2149 void pa_sink_attach(pa_sink *s) {
2150 pa_sink_assert_ref(s);
2151 pa_assert_ctl_context();
2152 pa_assert(PA_SINK_IS_LINKED(s->state));
2153
2154 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2155 }
2156
2157 /* Called from IO thread */
2158 void pa_sink_detach_within_thread(pa_sink *s) {
2159 pa_sink_input *i;
2160 void *state = NULL;
2161
2162 pa_sink_assert_ref(s);
2163 pa_sink_assert_io_context(s);
2164 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2165
2166 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2167 if (i->detach)
2168 i->detach(i);
2169
2170 if (s->monitor_source)
2171 pa_source_detach_within_thread(s->monitor_source);
2172 }
2173
2174 /* Called from IO thread */
2175 void pa_sink_attach_within_thread(pa_sink *s) {
2176 pa_sink_input *i;
2177 void *state = NULL;
2178
2179 pa_sink_assert_ref(s);
2180 pa_sink_assert_io_context(s);
2181 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2182
2183 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2184 if (i->attach)
2185 i->attach(i);
2186
2187 if (s->monitor_source)
2188 pa_source_attach_within_thread(s->monitor_source);
2189 }
2190
2191 /* Called from IO thread */
2192 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2193 pa_sink_assert_ref(s);
2194 pa_sink_assert_io_context(s);
2195 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2196
2197 if (s->thread_info.state == PA_SINK_SUSPENDED)
2198 return;
2199
2200 if (nbytes == (size_t) -1)
2201 nbytes = s->thread_info.max_rewind;
2202
2203 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2204
2205 if (s->thread_info.rewind_requested &&
2206 nbytes <= s->thread_info.rewind_nbytes)
2207 return;
2208
2209 s->thread_info.rewind_nbytes = nbytes;
2210 s->thread_info.rewind_requested = TRUE;
2211
2212 if (s->request_rewind)
2213 s->request_rewind(s);
2214 }
2215
2216 /* Called from IO thread */
2217 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2218 pa_usec_t result = (pa_usec_t) -1;
2219 pa_sink_input *i;
2220 void *state = NULL;
2221 pa_usec_t monitor_latency;
2222
2223 pa_sink_assert_ref(s);
2224 pa_sink_assert_io_context(s);
2225
2226 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2227 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2228
2229 if (s->thread_info.requested_latency_valid)
2230 return s->thread_info.requested_latency;
2231
2232 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2233 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2234 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2235 result = i->thread_info.requested_sink_latency;
2236
2237 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2238
2239 if (monitor_latency != (pa_usec_t) -1 &&
2240 (result == (pa_usec_t) -1 || result > monitor_latency))
2241 result = monitor_latency;
2242
2243 if (result != (pa_usec_t) -1)
2244 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2245
2246 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2247 /* Only cache if properly initialized */
2248 s->thread_info.requested_latency = result;
2249 s->thread_info.requested_latency_valid = TRUE;
2250 }
2251
2252 return result;
2253 }
2254
2255 /* Called from main thread */
2256 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2257 pa_usec_t usec = 0;
2258
2259 pa_sink_assert_ref(s);
2260 pa_assert_ctl_context();
2261 pa_assert(PA_SINK_IS_LINKED(s->state));
2262
2263 if (s->state == PA_SINK_SUSPENDED)
2264 return 0;
2265
2266 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2267 return usec;
2268 }
2269
2270 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2271 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2272 pa_sink_input *i;
2273 void *state = NULL;
2274
2275 pa_sink_assert_ref(s);
2276 pa_sink_assert_io_context(s);
2277
2278 if (max_rewind == s->thread_info.max_rewind)
2279 return;
2280
2281 s->thread_info.max_rewind = max_rewind;
2282
2283 if (PA_SINK_IS_LINKED(s->thread_info.state))
2284 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2285 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2286
2287 if (s->monitor_source)
2288 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2289 }
2290
2291 /* Called from main thread */
2292 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2293 pa_sink_assert_ref(s);
2294 pa_assert_ctl_context();
2295
2296 if (PA_SINK_IS_LINKED(s->state))
2297 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2298 else
2299 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2300 }
2301
2302 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2303 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2304 void *state = NULL;
2305
2306 pa_sink_assert_ref(s);
2307 pa_sink_assert_io_context(s);
2308
2309 if (max_request == s->thread_info.max_request)
2310 return;
2311
2312 s->thread_info.max_request = max_request;
2313
2314 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2315 pa_sink_input *i;
2316
2317 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2318 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2319 }
2320 }
2321
2322 /* Called from main thread */
2323 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2324 pa_sink_assert_ref(s);
2325 pa_assert_ctl_context();
2326
2327 if (PA_SINK_IS_LINKED(s->state))
2328 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2329 else
2330 pa_sink_set_max_request_within_thread(s, max_request);
2331 }
2332
2333 /* Called from IO thread */
2334 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2335 pa_sink_input *i;
2336 void *state = NULL;
2337
2338 pa_sink_assert_ref(s);
2339 pa_sink_assert_io_context(s);
2340
2341 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2342 s->thread_info.requested_latency_valid = FALSE;
2343 else if (dynamic)
2344 return;
2345
2346 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2347
2348 if (s->update_requested_latency)
2349 s->update_requested_latency(s);
2350
2351 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2352 if (i->update_sink_requested_latency)
2353 i->update_sink_requested_latency(i);
2354 }
2355 }
2356
2357 /* Called from main thread */
2358 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2359 pa_sink_assert_ref(s);
2360 pa_assert_ctl_context();
2361
2362 /* min_latency == 0: no limit
2363 * min_latency anything else: specified limit
2364 *
2365 * Similar for max_latency */
2366
2367 if (min_latency < ABSOLUTE_MIN_LATENCY)
2368 min_latency = ABSOLUTE_MIN_LATENCY;
2369
2370 if (max_latency <= 0 ||
2371 max_latency > ABSOLUTE_MAX_LATENCY)
2372 max_latency = ABSOLUTE_MAX_LATENCY;
2373
2374 pa_assert(min_latency <= max_latency);
2375
2376 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2377 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2378 max_latency == ABSOLUTE_MAX_LATENCY) ||
2379 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2380
2381 if (PA_SINK_IS_LINKED(s->state)) {
2382 pa_usec_t r[2];
2383
2384 r[0] = min_latency;
2385 r[1] = max_latency;
2386
2387 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2388 } else
2389 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2390 }
2391
2392 /* Called from main thread */
2393 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2394 pa_sink_assert_ref(s);
2395 pa_assert_ctl_context();
2396 pa_assert(min_latency);
2397 pa_assert(max_latency);
2398
2399 if (PA_SINK_IS_LINKED(s->state)) {
2400 pa_usec_t r[2] = { 0, 0 };
2401
2402 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2403
2404 *min_latency = r[0];
2405 *max_latency = r[1];
2406 } else {
2407 *min_latency = s->thread_info.min_latency;
2408 *max_latency = s->thread_info.max_latency;
2409 }
2410 }
2411
2412 /* Called from IO thread */
2413 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2414 pa_sink_assert_ref(s);
2415 pa_sink_assert_io_context(s);
2416
2417 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2418 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2419 pa_assert(min_latency <= max_latency);
2420
2421 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2422 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2423 max_latency == ABSOLUTE_MAX_LATENCY) ||
2424 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2425
2426 if (s->thread_info.min_latency == min_latency &&
2427 s->thread_info.max_latency == max_latency)
2428 return;
2429
2430 s->thread_info.min_latency = min_latency;
2431 s->thread_info.max_latency = max_latency;
2432
2433 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2434 pa_sink_input *i;
2435 void *state = NULL;
2436
2437 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2438 if (i->update_sink_latency_range)
2439 i->update_sink_latency_range(i);
2440 }
2441
2442 pa_sink_invalidate_requested_latency(s, FALSE);
2443
2444 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2445 }
2446
2447 /* Called from main thread */
2448 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2449 pa_sink_assert_ref(s);
2450 pa_assert_ctl_context();
2451
2452 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2453 pa_assert(latency == 0);
2454 return;
2455 }
2456
2457 if (latency < ABSOLUTE_MIN_LATENCY)
2458 latency = ABSOLUTE_MIN_LATENCY;
2459
2460 if (latency > ABSOLUTE_MAX_LATENCY)
2461 latency = ABSOLUTE_MAX_LATENCY;
2462
2463 if (PA_SINK_IS_LINKED(s->state))
2464 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2465 else
2466 s->thread_info.fixed_latency = latency;
2467
2468 pa_source_set_fixed_latency(s->monitor_source, latency);
2469 }
2470
2471 /* Called from main thread */
2472 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2473 pa_usec_t latency;
2474
2475 pa_sink_assert_ref(s);
2476 pa_assert_ctl_context();
2477
2478 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2479 return 0;
2480
2481 if (PA_SINK_IS_LINKED(s->state))
2482 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2483 else
2484 latency = s->thread_info.fixed_latency;
2485
2486 return latency;
2487 }
2488
2489 /* Called from IO thread */
2490 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2491 pa_sink_assert_ref(s);
2492 pa_sink_assert_io_context(s);
2493
2494 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2495 pa_assert(latency == 0);
2496 return;
2497 }
2498
2499 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2500 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2501
2502 if (s->thread_info.fixed_latency == latency)
2503 return;
2504
2505 s->thread_info.fixed_latency = latency;
2506
2507 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2508 pa_sink_input *i;
2509 void *state = NULL;
2510
2511 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2512 if (i->update_sink_fixed_latency)
2513 i->update_sink_fixed_latency(i);
2514 }
2515
2516 pa_sink_invalidate_requested_latency(s, FALSE);
2517
2518 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2519 }
2520
2521 /* Called from main context */
2522 size_t pa_sink_get_max_rewind(pa_sink *s) {
2523 size_t r;
2524 pa_sink_assert_ref(s);
2525 pa_assert_ctl_context();
2526
2527 if (!PA_SINK_IS_LINKED(s->state))
2528 return s->thread_info.max_rewind;
2529
2530 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2531
2532 return r;
2533 }
2534
2535 /* Called from main context */
2536 size_t pa_sink_get_max_request(pa_sink *s) {
2537 size_t r;
2538 pa_sink_assert_ref(s);
2539 pa_assert_ctl_context();
2540
2541 if (!PA_SINK_IS_LINKED(s->state))
2542 return s->thread_info.max_request;
2543
2544 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2545
2546 return r;
2547 }
2548
2549 /* Called from main context */
2550 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2551 pa_device_port *port;
2552
2553 pa_sink_assert_ref(s);
2554 pa_assert_ctl_context();
2555
2556 if (!s->set_port) {
2557 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2558 return -PA_ERR_NOTIMPLEMENTED;
2559 }
2560
2561 if (!s->ports)
2562 return -PA_ERR_NOENTITY;
2563
2564 if (!(port = pa_hashmap_get(s->ports, name)))
2565 return -PA_ERR_NOENTITY;
2566
2567 if (s->active_port == port) {
2568 s->save_port = s->save_port || save;
2569 return 0;
2570 }
2571
2572 if ((s->set_port(s, port)) < 0)
2573 return -PA_ERR_NOENTITY;
2574
2575 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2576
2577 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2578
2579 s->active_port = port;
2580 s->save_port = save;
2581
2582 return 0;
2583 }
2584
2585 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2586 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2587
2588 pa_assert(p);
2589
2590 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2591 return TRUE;
2592
2593 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2594
2595 if (pa_streq(ff, "microphone"))
2596 t = "audio-input-microphone";
2597 else if (pa_streq(ff, "webcam"))
2598 t = "camera-web";
2599 else if (pa_streq(ff, "computer"))
2600 t = "computer";
2601 else if (pa_streq(ff, "handset"))
2602 t = "phone";
2603 else if (pa_streq(ff, "portable"))
2604 t = "multimedia-player";
2605 else if (pa_streq(ff, "tv"))
2606 t = "video-display";
2607
2608 /*
2609 * The following icons are not part of the icon naming spec,
2610 * because Rodney Dawes sucks as the maintainer of that spec.
2611 *
2612 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2613 */
2614 else if (pa_streq(ff, "headset"))
2615 t = "audio-headset";
2616 else if (pa_streq(ff, "headphone"))
2617 t = "audio-headphones";
2618 else if (pa_streq(ff, "speaker"))
2619 t = "audio-speakers";
2620 else if (pa_streq(ff, "hands-free"))
2621 t = "audio-handsfree";
2622 }
2623
2624 if (!t)
2625 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2626 if (pa_streq(c, "modem"))
2627 t = "modem";
2628
2629 if (!t) {
2630 if (is_sink)
2631 t = "audio-card";
2632 else
2633 t = "audio-input-microphone";
2634 }
2635
2636 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2637 if (strstr(profile, "analog"))
2638 s = "-analog";
2639 else if (strstr(profile, "iec958"))
2640 s = "-iec958";
2641 else if (strstr(profile, "hdmi"))
2642 s = "-hdmi";
2643 }
2644
2645 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2646
2647 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2648
2649 return TRUE;
2650 }
2651
2652 pa_bool_t pa_device_init_description(pa_proplist *p) {
2653 const char *s, *d = NULL, *k;
2654 pa_assert(p);
2655
2656 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2657 return TRUE;
2658
2659 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2660 if (pa_streq(s, "internal"))
2661 d = _("Internal Audio");
2662
2663 if (!d)
2664 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2665 if (pa_streq(s, "modem"))
2666 d = _("Modem");
2667
2668 if (!d)
2669 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2670
2671 if (!d)
2672 return FALSE;
2673
2674 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2675
2676 if (d && k)
2677 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2678 else if (d)
2679 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2680
2681 return TRUE;
2682 }
2683
2684 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2685 const char *s;
2686 pa_assert(p);
2687
2688 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2689 return TRUE;
2690
2691 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2692 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2693 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2694 return TRUE;
2695 }
2696
2697 return FALSE;
2698 }