]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Merge branch 'master' of git://0pointer.de/pulseaudio into dbus-work
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->real_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
309
310 /* FIXME: This should probably be moved to pa_sink_put() */
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312
313 if (s->card)
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
318 s->index,
319 s->name,
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
322 pt);
323 pa_xfree(pt);
324
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
332
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340
341 pa_source_new_data_done(&source_data);
342
343 if (!s->monitor_source) {
344 pa_sink_unlink(s);
345 pa_sink_unref(s);
346 return NULL;
347 }
348
349 s->monitor_source->monitor_of = s;
350
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
353 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
354
355 return s;
356 }
357
358 /* Called from main context */
359 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
360 int ret;
361 pa_bool_t suspend_change;
362 pa_sink_state_t original_state;
363
364 pa_assert(s);
365 pa_assert_ctl_context();
366
367 if (s->state == state)
368 return 0;
369
370 original_state = s->state;
371
372 suspend_change =
373 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
374 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
375
376 if (s->set_state)
377 if ((ret = s->set_state(s, state)) < 0)
378 return ret;
379
380 if (s->asyncmsgq)
381 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
382
383 if (s->set_state)
384 s->set_state(s, original_state);
385
386 return ret;
387 }
388
389 s->state = state;
390
391 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
392 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
393 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
394 }
395
396 if (suspend_change) {
397 pa_sink_input *i;
398 uint32_t idx;
399
400 /* We're suspending or resuming, tell everyone about it */
401
402 PA_IDXSET_FOREACH(i, s->inputs, idx)
403 if (s->state == PA_SINK_SUSPENDED &&
404 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
405 pa_sink_input_kill(i);
406 else if (i->suspend)
407 i->suspend(i, state == PA_SINK_SUSPENDED);
408
409 if (s->monitor_source)
410 pa_source_sync_suspend(s->monitor_source);
411 }
412
413 return 0;
414 }
415
416 /* Called from main context */
417 void pa_sink_put(pa_sink* s) {
418 pa_sink_assert_ref(s);
419 pa_assert_ctl_context();
420
421 pa_assert(s->state == PA_SINK_INIT);
422
423 /* The following fields must be initialized properly when calling _put() */
424 pa_assert(s->asyncmsgq);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
426
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
430
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
433
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
436
437 /* We assume that if the sink implementor changed the default
438 * volume he did so in real_volume, because that is the usual
439 * place where he is supposed to place his changes. */
440 s->reference_volume = s->real_volume;
441
442 s->thread_info.soft_volume = s->soft_volume;
443 s->thread_info.soft_muted = s->muted;
444
445 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
446 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
447 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
448 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
449 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
450
451 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
452 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
453 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
454
455 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
456
457 pa_source_put(s->monitor_source);
458
459 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
460 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
461 }
462
463 /* Called from main context */
464 void pa_sink_unlink(pa_sink* s) {
465 pa_bool_t linked;
466 pa_sink_input *i, *j = NULL;
467
468 pa_assert(s);
469 pa_assert_ctl_context();
470
471 /* Please note that pa_sink_unlink() does more than simply
472 * reversing pa_sink_put(). It also undoes the registrations
473 * already done in pa_sink_new()! */
474
475 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
476 * may be called multiple times on the same sink without bad
477 * effects. */
478
479 linked = PA_SINK_IS_LINKED(s->state);
480
481 if (linked)
482 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
483
484 if (s->state != PA_SINK_UNLINKED)
485 pa_namereg_unregister(s->core, s->name);
486 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
487
488 if (s->card)
489 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
490
491 while ((i = pa_idxset_first(s->inputs, NULL))) {
492 pa_assert(i != j);
493 pa_sink_input_kill(i);
494 j = i;
495 }
496
497 if (linked)
498 sink_set_state(s, PA_SINK_UNLINKED);
499 else
500 s->state = PA_SINK_UNLINKED;
501
502 reset_callbacks(s);
503
504 if (s->monitor_source)
505 pa_source_unlink(s->monitor_source);
506
507 if (linked) {
508 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
509 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
510 }
511 }
512
513 /* Called from main context */
514 static void sink_free(pa_object *o) {
515 pa_sink *s = PA_SINK(o);
516 pa_sink_input *i;
517
518 pa_assert(s);
519 pa_assert_ctl_context();
520 pa_assert(pa_sink_refcnt(s) == 0);
521
522 if (PA_SINK_IS_LINKED(s->state))
523 pa_sink_unlink(s);
524
525 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
526
527 if (s->monitor_source) {
528 pa_source_unref(s->monitor_source);
529 s->monitor_source = NULL;
530 }
531
532 pa_idxset_free(s->inputs, NULL, NULL);
533
534 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
535 pa_sink_input_unref(i);
536
537 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
538
539 if (s->silence.memblock)
540 pa_memblock_unref(s->silence.memblock);
541
542 pa_xfree(s->name);
543 pa_xfree(s->driver);
544
545 if (s->proplist)
546 pa_proplist_free(s->proplist);
547
548 if (s->ports) {
549 pa_device_port *p;
550
551 while ((p = pa_hashmap_steal_first(s->ports)))
552 pa_device_port_free(p);
553
554 pa_hashmap_free(s->ports, NULL, NULL);
555 }
556
557 pa_xfree(s);
558 }
559
560 /* Called from main context, and not while the IO thread is active, please */
561 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
562 pa_sink_assert_ref(s);
563 pa_assert_ctl_context();
564
565 s->asyncmsgq = q;
566
567 if (s->monitor_source)
568 pa_source_set_asyncmsgq(s->monitor_source, q);
569 }
570
571 /* Called from main context, and not while the IO thread is active, please */
572 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
573 pa_sink_assert_ref(s);
574 pa_assert_ctl_context();
575
576 if (mask == 0)
577 return;
578
579 /* For now, allow only a minimal set of flags to be changed. */
580 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
581
582 s->flags = (s->flags & ~mask) | (value & mask);
583
584 pa_source_update_flags(s->monitor_source,
585 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
586 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
587 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
588 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
589 }
590
591 /* Called from IO context, or before _put() from main context */
592 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
593 pa_sink_assert_ref(s);
594 pa_sink_assert_io_context(s);
595
596 s->thread_info.rtpoll = p;
597
598 if (s->monitor_source)
599 pa_source_set_rtpoll(s->monitor_source, p);
600 }
601
602 /* Called from main context */
603 int pa_sink_update_status(pa_sink*s) {
604 pa_sink_assert_ref(s);
605 pa_assert_ctl_context();
606 pa_assert(PA_SINK_IS_LINKED(s->state));
607
608 if (s->state == PA_SINK_SUSPENDED)
609 return 0;
610
611 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
612 }
613
614 /* Called from main context */
615 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
616 pa_sink_assert_ref(s);
617 pa_assert_ctl_context();
618 pa_assert(PA_SINK_IS_LINKED(s->state));
619 pa_assert(cause != 0);
620
621 if (suspend) {
622 s->suspend_cause |= cause;
623 s->monitor_source->suspend_cause |= cause;
624 } else {
625 s->suspend_cause &= ~cause;
626 s->monitor_source->suspend_cause &= ~cause;
627 }
628
629 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
630 return 0;
631
632 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
633
634 if (s->suspend_cause)
635 return sink_set_state(s, PA_SINK_SUSPENDED);
636 else
637 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
638 }
639
640 /* Called from main context */
641 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
642 pa_sink_input *i, *n;
643 uint32_t idx;
644
645 pa_sink_assert_ref(s);
646 pa_assert_ctl_context();
647 pa_assert(PA_SINK_IS_LINKED(s->state));
648
649 if (!q)
650 q = pa_queue_new();
651
652 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
653 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
654
655 pa_sink_input_ref(i);
656
657 if (pa_sink_input_start_move(i) >= 0)
658 pa_queue_push(q, i);
659 else
660 pa_sink_input_unref(i);
661 }
662
663 return q;
664 }
665
666 /* Called from main context */
667 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
668 pa_sink_input *i;
669
670 pa_sink_assert_ref(s);
671 pa_assert_ctl_context();
672 pa_assert(PA_SINK_IS_LINKED(s->state));
673 pa_assert(q);
674
675 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
676 if (pa_sink_input_finish_move(i, s, save) < 0)
677 pa_sink_input_fail_move(i);
678
679 pa_sink_input_unref(i);
680 }
681
682 pa_queue_free(q, NULL, NULL);
683 }
684
685 /* Called from main context */
686 void pa_sink_move_all_fail(pa_queue *q) {
687 pa_sink_input *i;
688
689 pa_assert_ctl_context();
690 pa_assert(q);
691
692 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
693 pa_sink_input_fail_move(i);
694 pa_sink_input_unref(i);
695 }
696
697 pa_queue_free(q, NULL, NULL);
698 }
699
700 /* Called from IO thread context */
701 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
702 pa_sink_input *i;
703 void *state = NULL;
704
705 pa_sink_assert_ref(s);
706 pa_sink_assert_io_context(s);
707 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
708
709 /* If nobody requested this and this is actually no real rewind
710 * then we can short cut this. Please note that this means that
711 * not all rewind requests triggered upstream will always be
712 * translated in actual requests! */
713 if (!s->thread_info.rewind_requested && nbytes <= 0)
714 return;
715
716 s->thread_info.rewind_nbytes = 0;
717 s->thread_info.rewind_requested = FALSE;
718
719 if (s->thread_info.state == PA_SINK_SUSPENDED)
720 return;
721
722 if (nbytes > 0)
723 pa_log_debug("Processing rewind...");
724
725 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
726 pa_sink_input_assert_ref(i);
727 pa_sink_input_process_rewind(i, nbytes);
728 }
729
730 if (nbytes > 0)
731 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
732 pa_source_process_rewind(s->monitor_source, nbytes);
733 }
734
735 /* Called from IO thread context */
736 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
737 pa_sink_input *i;
738 unsigned n = 0;
739 void *state = NULL;
740 size_t mixlength = *length;
741
742 pa_sink_assert_ref(s);
743 pa_sink_assert_io_context(s);
744 pa_assert(info);
745
746 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
747 pa_sink_input_assert_ref(i);
748
749 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
750
751 if (mixlength == 0 || info->chunk.length < mixlength)
752 mixlength = info->chunk.length;
753
754 if (pa_memblock_is_silence(info->chunk.memblock)) {
755 pa_memblock_unref(info->chunk.memblock);
756 continue;
757 }
758
759 info->userdata = pa_sink_input_ref(i);
760
761 pa_assert(info->chunk.memblock);
762 pa_assert(info->chunk.length > 0);
763
764 info++;
765 n++;
766 maxinfo--;
767 }
768
769 if (mixlength > 0)
770 *length = mixlength;
771
772 return n;
773 }
774
775 /* Called from IO thread context */
776 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
777 pa_sink_input *i;
778 void *state = NULL;
779 unsigned p = 0;
780 unsigned n_unreffed = 0;
781
782 pa_sink_assert_ref(s);
783 pa_sink_assert_io_context(s);
784 pa_assert(result);
785 pa_assert(result->memblock);
786 pa_assert(result->length > 0);
787
788 /* We optimize for the case where the order of the inputs has not changed */
789
790 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
791 unsigned j;
792 pa_mix_info* m = NULL;
793
794 pa_sink_input_assert_ref(i);
795
796 /* Let's try to find the matching entry info the pa_mix_info array */
797 for (j = 0; j < n; j ++) {
798
799 if (info[p].userdata == i) {
800 m = info + p;
801 break;
802 }
803
804 p++;
805 if (p >= n)
806 p = 0;
807 }
808
809 /* Drop read data */
810 pa_sink_input_drop(i, result->length);
811
812 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
813
814 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
815 void *ostate = NULL;
816 pa_source_output *o;
817 pa_memchunk c;
818
819 if (m && m->chunk.memblock) {
820 c = m->chunk;
821 pa_memblock_ref(c.memblock);
822 pa_assert(result->length <= c.length);
823 c.length = result->length;
824
825 pa_memchunk_make_writable(&c, 0);
826 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
827 } else {
828 c = s->silence;
829 pa_memblock_ref(c.memblock);
830 pa_assert(result->length <= c.length);
831 c.length = result->length;
832 }
833
834 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
835 pa_source_output_assert_ref(o);
836 pa_assert(o->direct_on_input == i);
837 pa_source_post_direct(s->monitor_source, o, &c);
838 }
839
840 pa_memblock_unref(c.memblock);
841 }
842 }
843
844 if (m) {
845 if (m->chunk.memblock)
846 pa_memblock_unref(m->chunk.memblock);
847 pa_memchunk_reset(&m->chunk);
848
849 pa_sink_input_unref(m->userdata);
850 m->userdata = NULL;
851
852 n_unreffed += 1;
853 }
854 }
855
856 /* Now drop references to entries that are included in the
857 * pa_mix_info array but don't exist anymore */
858
859 if (n_unreffed < n) {
860 for (; n > 0; info++, n--) {
861 if (info->userdata)
862 pa_sink_input_unref(info->userdata);
863 if (info->chunk.memblock)
864 pa_memblock_unref(info->chunk.memblock);
865 }
866 }
867
868 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
869 pa_source_post(s->monitor_source, result);
870 }
871
872 /* Called from IO thread context */
873 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
874 pa_mix_info info[MAX_MIX_CHANNELS];
875 unsigned n;
876 size_t block_size_max;
877
878 pa_sink_assert_ref(s);
879 pa_sink_assert_io_context(s);
880 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
881 pa_assert(pa_frame_aligned(length, &s->sample_spec));
882 pa_assert(result);
883
884 pa_sink_ref(s);
885
886 pa_assert(!s->thread_info.rewind_requested);
887 pa_assert(s->thread_info.rewind_nbytes == 0);
888
889 if (s->thread_info.state == PA_SINK_SUSPENDED) {
890 result->memblock = pa_memblock_ref(s->silence.memblock);
891 result->index = s->silence.index;
892 result->length = PA_MIN(s->silence.length, length);
893 return;
894 }
895
896 if (length <= 0)
897 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
898
899 block_size_max = pa_mempool_block_size_max(s->core->mempool);
900 if (length > block_size_max)
901 length = pa_frame_align(block_size_max, &s->sample_spec);
902
903 pa_assert(length > 0);
904
905 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
906
907 if (n == 0) {
908
909 *result = s->silence;
910 pa_memblock_ref(result->memblock);
911
912 if (result->length > length)
913 result->length = length;
914
915 } else if (n == 1) {
916 pa_cvolume volume;
917
918 *result = info[0].chunk;
919 pa_memblock_ref(result->memblock);
920
921 if (result->length > length)
922 result->length = length;
923
924 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
925
926 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
927 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
928 pa_memblock_unref(result->memblock);
929 pa_silence_memchunk_get(&s->core->silence_cache,
930 s->core->mempool,
931 result,
932 &s->sample_spec,
933 result->length);
934 } else {
935 pa_memchunk_make_writable(result, 0);
936 pa_volume_memchunk(result, &s->sample_spec, &volume);
937 }
938 }
939 } else {
940 void *ptr;
941 result->memblock = pa_memblock_new(s->core->mempool, length);
942
943 ptr = pa_memblock_acquire(result->memblock);
944 result->length = pa_mix(info, n,
945 ptr, length,
946 &s->sample_spec,
947 &s->thread_info.soft_volume,
948 s->thread_info.soft_muted);
949 pa_memblock_release(result->memblock);
950
951 result->index = 0;
952 }
953
954 inputs_drop(s, info, n, result);
955
956 pa_sink_unref(s);
957 }
958
959 /* Called from IO thread context */
960 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
961 pa_mix_info info[MAX_MIX_CHANNELS];
962 unsigned n;
963 size_t length, block_size_max;
964
965 pa_sink_assert_ref(s);
966 pa_sink_assert_io_context(s);
967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
968 pa_assert(target);
969 pa_assert(target->memblock);
970 pa_assert(target->length > 0);
971 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
972
973 pa_sink_ref(s);
974
975 pa_assert(!s->thread_info.rewind_requested);
976 pa_assert(s->thread_info.rewind_nbytes == 0);
977
978 if (s->thread_info.state == PA_SINK_SUSPENDED) {
979 pa_silence_memchunk(target, &s->sample_spec);
980 return;
981 }
982
983 length = target->length;
984 block_size_max = pa_mempool_block_size_max(s->core->mempool);
985 if (length > block_size_max)
986 length = pa_frame_align(block_size_max, &s->sample_spec);
987
988 pa_assert(length > 0);
989
990 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
991
992 if (n == 0) {
993 if (target->length > length)
994 target->length = length;
995
996 pa_silence_memchunk(target, &s->sample_spec);
997 } else if (n == 1) {
998 pa_cvolume volume;
999
1000 if (target->length > length)
1001 target->length = length;
1002
1003 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1004
1005 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1006 pa_silence_memchunk(target, &s->sample_spec);
1007 else {
1008 pa_memchunk vchunk;
1009
1010 vchunk = info[0].chunk;
1011 pa_memblock_ref(vchunk.memblock);
1012
1013 if (vchunk.length > length)
1014 vchunk.length = length;
1015
1016 if (!pa_cvolume_is_norm(&volume)) {
1017 pa_memchunk_make_writable(&vchunk, 0);
1018 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1019 }
1020
1021 pa_memchunk_memcpy(target, &vchunk);
1022 pa_memblock_unref(vchunk.memblock);
1023 }
1024
1025 } else {
1026 void *ptr;
1027
1028 ptr = pa_memblock_acquire(target->memblock);
1029
1030 target->length = pa_mix(info, n,
1031 (uint8_t*) ptr + target->index, length,
1032 &s->sample_spec,
1033 &s->thread_info.soft_volume,
1034 s->thread_info.soft_muted);
1035
1036 pa_memblock_release(target->memblock);
1037 }
1038
1039 inputs_drop(s, info, n, target);
1040
1041 pa_sink_unref(s);
1042 }
1043
1044 /* Called from IO thread context */
1045 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1046 pa_memchunk chunk;
1047 size_t l, d;
1048
1049 pa_sink_assert_ref(s);
1050 pa_sink_assert_io_context(s);
1051 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1052 pa_assert(target);
1053 pa_assert(target->memblock);
1054 pa_assert(target->length > 0);
1055 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1056
1057 pa_sink_ref(s);
1058
1059 pa_assert(!s->thread_info.rewind_requested);
1060 pa_assert(s->thread_info.rewind_nbytes == 0);
1061
1062 l = target->length;
1063 d = 0;
1064 while (l > 0) {
1065 chunk = *target;
1066 chunk.index += d;
1067 chunk.length -= d;
1068
1069 pa_sink_render_into(s, &chunk);
1070
1071 d += chunk.length;
1072 l -= chunk.length;
1073 }
1074
1075 pa_sink_unref(s);
1076 }
1077
1078 /* Called from IO thread context */
1079 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1080 pa_mix_info info[MAX_MIX_CHANNELS];
1081 size_t length1st = length;
1082 unsigned n;
1083
1084 pa_sink_assert_ref(s);
1085 pa_sink_assert_io_context(s);
1086 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1087 pa_assert(length > 0);
1088 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1089 pa_assert(result);
1090
1091 pa_sink_ref(s);
1092
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1095
1096 pa_assert(length > 0);
1097
1098 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1099
1100 if (n == 0) {
1101 pa_silence_memchunk_get(&s->core->silence_cache,
1102 s->core->mempool,
1103 result,
1104 &s->sample_spec,
1105 length1st);
1106 } else if (n == 1) {
1107 pa_cvolume volume;
1108
1109 *result = info[0].chunk;
1110 pa_memblock_ref(result->memblock);
1111
1112 if (result->length > length)
1113 result->length = length;
1114
1115 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1116
1117 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1118 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1119 pa_memblock_unref(result->memblock);
1120 pa_silence_memchunk_get(&s->core->silence_cache,
1121 s->core->mempool,
1122 result,
1123 &s->sample_spec,
1124 result->length);
1125 } else {
1126 pa_memchunk_make_writable(result, length);
1127 pa_volume_memchunk(result, &s->sample_spec, &volume);
1128 }
1129 }
1130 } else {
1131 void *ptr;
1132
1133 result->index = 0;
1134 result->memblock = pa_memblock_new(s->core->mempool, length);
1135
1136 ptr = pa_memblock_acquire(result->memblock);
1137
1138 result->length = pa_mix(info, n,
1139 (uint8_t*) ptr + result->index, length1st,
1140 &s->sample_spec,
1141 &s->thread_info.soft_volume,
1142 s->thread_info.soft_muted);
1143
1144 pa_memblock_release(result->memblock);
1145 }
1146
1147 inputs_drop(s, info, n, result);
1148
1149 if (result->length < length) {
1150 pa_memchunk chunk;
1151 size_t l, d;
1152 pa_memchunk_make_writable(result, length);
1153
1154 l = length - result->length;
1155 d = result->index + result->length;
1156 while (l > 0) {
1157 chunk = *result;
1158 chunk.index = d;
1159 chunk.length = l;
1160
1161 pa_sink_render_into(s, &chunk);
1162
1163 d += chunk.length;
1164 l -= chunk.length;
1165 }
1166 result->length = length;
1167 }
1168
1169 pa_sink_unref(s);
1170 }
1171
1172 /* Called from main thread */
1173 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1174 pa_usec_t usec = 0;
1175
1176 pa_sink_assert_ref(s);
1177 pa_assert_ctl_context();
1178 pa_assert(PA_SINK_IS_LINKED(s->state));
1179
1180 /* The returned value is supposed to be in the time domain of the sound card! */
1181
1182 if (s->state == PA_SINK_SUSPENDED)
1183 return 0;
1184
1185 if (!(s->flags & PA_SINK_LATENCY))
1186 return 0;
1187
1188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1189
1190 return usec;
1191 }
1192
1193 /* Called from IO thread */
1194 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1195 pa_usec_t usec = 0;
1196 pa_msgobject *o;
1197
1198 pa_sink_assert_ref(s);
1199 pa_sink_assert_io_context(s);
1200 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1201
1202 /* The returned value is supposed to be in the time domain of the sound card! */
1203
1204 if (s->thread_info.state == PA_SINK_SUSPENDED)
1205 return 0;
1206
1207 if (!(s->flags & PA_SINK_LATENCY))
1208 return 0;
1209
1210 o = PA_MSGOBJECT(s);
1211
1212 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1213
1214 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1215 return -1;
1216
1217 return usec;
1218 }
1219
1220 /* Called from main context */
1221 static void compute_reference_ratios(pa_sink *s) {
1222 uint32_t idx;
1223 pa_sink_input *i;
1224
1225 pa_sink_assert_ref(s);
1226 pa_assert_ctl_context();
1227 pa_assert(PA_SINK_IS_LINKED(s->state));
1228 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1229
1230 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1231 unsigned c;
1232 pa_cvolume remapped;
1233
1234 /*
1235 * Calculates the reference volume from the sink's reference
1236 * volume. This basically calculates:
1237 *
1238 * i->reference_ratio = i->volume / s->reference_volume
1239 */
1240
1241 remapped = s->reference_volume;
1242 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1243
1244 i->reference_ratio.channels = i->sample_spec.channels;
1245
1246 for (c = 0; c < i->sample_spec.channels; c++) {
1247
1248 /* We don't update when the sink volume is 0 anyway */
1249 if (remapped.values[c] <= PA_VOLUME_MUTED)
1250 continue;
1251
1252 /* Don't update the reference ratio unless necessary */
1253 if (pa_sw_volume_multiply(
1254 i->reference_ratio.values[c],
1255 remapped.values[c]) == i->volume.values[c])
1256 continue;
1257
1258 i->reference_ratio.values[c] = pa_sw_volume_divide(
1259 i->volume.values[c],
1260 remapped.values[c]);
1261 }
1262 }
1263 }
1264
1265 /* Called from main context */
1266 static void compute_real_ratios(pa_sink *s) {
1267 pa_sink_input *i;
1268 uint32_t idx;
1269
1270 pa_sink_assert_ref(s);
1271 pa_assert_ctl_context();
1272 pa_assert(PA_SINK_IS_LINKED(s->state));
1273 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1274
1275 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1276 unsigned c;
1277 pa_cvolume remapped;
1278
1279 /*
1280 * This basically calculates:
1281 *
1282 * i->real_ratio := i->volume / s->real_volume
1283 * i->soft_volume := i->real_ratio * i->volume_factor
1284 */
1285
1286 remapped = s->real_volume;
1287 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1288
1289 i->real_ratio.channels = i->sample_spec.channels;
1290 i->soft_volume.channels = i->sample_spec.channels;
1291
1292 for (c = 0; c < i->sample_spec.channels; c++) {
1293
1294 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1295 /* We leave i->real_ratio untouched */
1296 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1297 continue;
1298 }
1299
1300 /* Don't lose accuracy unless necessary */
1301 if (pa_sw_volume_multiply(
1302 i->real_ratio.values[c],
1303 remapped.values[c]) != i->volume.values[c])
1304
1305 i->real_ratio.values[c] = pa_sw_volume_divide(
1306 i->volume.values[c],
1307 remapped.values[c]);
1308
1309 i->soft_volume.values[c] = pa_sw_volume_multiply(
1310 i->real_ratio.values[c],
1311 i->volume_factor.values[c]);
1312 }
1313
1314 /* We don't copy the soft_volume to the thread_info data
1315 * here. That must be done by the caller */
1316 }
1317 }
1318
1319 /* Called from main thread */
1320 static void compute_real_volume(pa_sink *s) {
1321 pa_sink_input *i;
1322 uint32_t idx;
1323
1324 pa_sink_assert_ref(s);
1325 pa_assert_ctl_context();
1326 pa_assert(PA_SINK_IS_LINKED(s->state));
1327 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1328
1329 /* This determines the maximum volume of all streams and sets
1330 * s->real_volume accordingly. */
1331
1332 if (pa_idxset_isempty(s->inputs)) {
1333 /* In the special case that we have no sink input we leave the
1334 * volume unmodified. */
1335 s->real_volume = s->reference_volume;
1336 return;
1337 }
1338
1339 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1340
1341 /* First let's determine the new maximum volume of all inputs
1342 * connected to this sink */
1343 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1344 pa_cvolume remapped;
1345
1346 remapped = i->volume;
1347 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1348 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1349 }
1350
1351 /* Then, let's update the real ratios/soft volumes of all inputs
1352 * connected to this sink */
1353 compute_real_ratios(s);
1354 }
1355
1356 /* Called from main thread */
1357 static void propagate_reference_volume(pa_sink *s) {
1358 pa_sink_input *i;
1359 uint32_t idx;
1360
1361 pa_sink_assert_ref(s);
1362 pa_assert_ctl_context();
1363 pa_assert(PA_SINK_IS_LINKED(s->state));
1364 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1365
1366 /* This is called whenever the sink volume changes that is not
1367 * caused by a sink input volume change. We need to fix up the
1368 * sink input volumes accordingly */
1369
1370 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1371 pa_cvolume old_volume, remapped;
1372
1373 old_volume = i->volume;
1374
1375 /* This basically calculates:
1376 *
1377 * i->volume := s->reference_volume * i->reference_ratio */
1378
1379 remapped = s->reference_volume;
1380 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1381 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1382
1383 /* The reference volume changed, let's tell people so */
1384 if (!pa_cvolume_equal(&old_volume, &i->volume))
1385 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1386 }
1387 }
1388
1389 /* Called from main thread */
1390 void pa_sink_set_volume(
1391 pa_sink *s,
1392 const pa_cvolume *volume,
1393 pa_bool_t sendmsg,
1394 pa_bool_t save) {
1395
1396 pa_cvolume old_reference_volume;
1397 pa_bool_t reference_changed;
1398
1399 pa_sink_assert_ref(s);
1400 pa_assert_ctl_context();
1401 pa_assert(PA_SINK_IS_LINKED(s->state));
1402 pa_assert(!volume || pa_cvolume_valid(volume));
1403 pa_assert(!volume || pa_cvolume_compatible(volume, &s->sample_spec));
1404 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1405
1406 /* If volume is NULL we synchronize the sink's real and reference
1407 * volumes with the stream volumes. If it is not NULL we update
1408 * the reference_volume with it. */
1409
1410 old_reference_volume = s->reference_volume;
1411
1412 if (volume) {
1413
1414 s->reference_volume = *volume;
1415
1416 if (s->flags & PA_SINK_FLAT_VOLUME) {
1417 /* OK, propagate this volume change back to the inputs */
1418 propagate_reference_volume(s);
1419
1420 /* And now recalculate the real volume */
1421 compute_real_volume(s);
1422 } else
1423 s->real_volume = s->reference_volume;
1424
1425 } else {
1426 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1427
1428 /* Ok, let's determine the new real volume */
1429 compute_real_volume(s);
1430
1431 /* Let's 'push' the reference volume if necessary */
1432 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1433
1434 /* We need to fix the reference ratios of all streams now that
1435 * we changed the reference volume */
1436 compute_reference_ratios(s);
1437 }
1438
1439 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1440 s->save_volume = (!reference_changed && s->save_volume) || save;
1441
1442 if (s->set_volume) {
1443 /* If we have a function set_volume(), then we do not apply a
1444 * soft volume by default. However, set_volume() is free to
1445 * apply one to s->soft_volume */
1446
1447 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1448 s->set_volume(s);
1449
1450 } else
1451 /* If we have no function set_volume(), then the soft volume
1452 * becomes the virtual volume */
1453 s->soft_volume = s->real_volume;
1454
1455 /* This tells the sink that soft and/or virtual volume changed */
1456 if (sendmsg)
1457 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1458
1459 if (reference_changed)
1460 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1461 }
1462
1463 /* Called from main thread. Only to be called by sink implementor */
1464 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1465 pa_sink_assert_ref(s);
1466 pa_assert_ctl_context();
1467
1468 if (!volume)
1469 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1470 else
1471 s->soft_volume = *volume;
1472
1473 if (PA_SINK_IS_LINKED(s->state))
1474 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1475 else
1476 s->thread_info.soft_volume = s->soft_volume;
1477 }
1478
1479 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1480 pa_sink_input *i;
1481 uint32_t idx;
1482 pa_cvolume old_reference_volume;
1483
1484 pa_sink_assert_ref(s);
1485 pa_assert_ctl_context();
1486 pa_assert(PA_SINK_IS_LINKED(s->state));
1487
1488 /* This is called when the hardware's real volume changes due to
1489 * some external event. We copy the real volume into our
1490 * reference volume and then rebuild the stream volumes based on
1491 * i->real_ratio which should stay fixed. */
1492
1493 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1494 return;
1495
1496 old_reference_volume = s->reference_volume;
1497
1498 /* 1. Make the real volume the reference volume */
1499 s->reference_volume = s->real_volume;
1500
1501 if (s->flags & PA_SINK_FLAT_VOLUME) {
1502
1503 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1504 pa_cvolume old_volume, remapped;
1505
1506 old_volume = i->volume;
1507
1508 /* 2. Since the sink's reference and real volumes are equal
1509 * now our ratios should be too. */
1510 i->reference_ratio = i->real_ratio;
1511
1512 /* 3. Recalculate the new stream reference volume based on the
1513 * reference ratio and the sink's reference volume.
1514 *
1515 * This basically calculates:
1516 *
1517 * i->volume = s->reference_volume * i->reference_ratio
1518 *
1519 * This is identical to propagate_reference_volume() */
1520 remapped = s->reference_volume;
1521 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1522 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1523
1524 /* Notify if something changed */
1525 if (!pa_cvolume_equal(&old_volume, &i->volume))
1526 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1527 }
1528 }
1529
1530 /* Something got changed in the hardware. It probably makes sense
1531 * to save changed hw settings given that hw volume changes not
1532 * triggered by PA are almost certainly done by the user. */
1533 s->save_volume = TRUE;
1534
1535 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1536 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1537 }
1538
1539 /* Called from main thread */
1540 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1541 pa_sink_assert_ref(s);
1542 pa_assert_ctl_context();
1543 pa_assert(PA_SINK_IS_LINKED(s->state));
1544
1545 if (s->refresh_volume || force_refresh) {
1546 struct pa_cvolume old_real_volume;
1547
1548 old_real_volume = s->real_volume;
1549
1550 if (s->get_volume)
1551 s->get_volume(s);
1552
1553 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1554
1555 propagate_real_volume(s, &old_real_volume);
1556 }
1557
1558 return &s->reference_volume;
1559 }
1560
1561 /* Called from main thread */
1562 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1563 pa_cvolume old_real_volume;
1564
1565 pa_sink_assert_ref(s);
1566 pa_assert_ctl_context();
1567 pa_assert(PA_SINK_IS_LINKED(s->state));
1568
1569 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1570
1571 old_real_volume = s->real_volume;
1572 s->real_volume = *new_real_volume;
1573
1574 propagate_real_volume(s, &old_real_volume);
1575 }
1576
1577 /* Called from main thread */
1578 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1579 pa_bool_t old_muted;
1580
1581 pa_sink_assert_ref(s);
1582 pa_assert_ctl_context();
1583 pa_assert(PA_SINK_IS_LINKED(s->state));
1584
1585 old_muted = s->muted;
1586 s->muted = mute;
1587 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1588
1589 if (s->set_mute)
1590 s->set_mute(s);
1591
1592 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1593
1594 if (old_muted != s->muted)
1595 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1596 }
1597
1598 /* Called from main thread */
1599 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1600
1601 pa_sink_assert_ref(s);
1602 pa_assert_ctl_context();
1603 pa_assert(PA_SINK_IS_LINKED(s->state));
1604
1605 if (s->refresh_muted || force_refresh) {
1606 pa_bool_t old_muted = s->muted;
1607
1608 if (s->get_mute)
1609 s->get_mute(s);
1610
1611 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1612
1613 if (old_muted != s->muted) {
1614 s->save_muted = TRUE;
1615
1616 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1617
1618 /* Make sure the soft mute status stays in sync */
1619 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1620 }
1621 }
1622
1623 return s->muted;
1624 }
1625
1626 /* Called from main thread */
1627 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1628 pa_sink_assert_ref(s);
1629 pa_assert_ctl_context();
1630 pa_assert(PA_SINK_IS_LINKED(s->state));
1631
1632 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1633
1634 if (s->muted == new_muted)
1635 return;
1636
1637 s->muted = new_muted;
1638 s->save_muted = TRUE;
1639
1640 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1641 }
1642
1643 /* Called from main thread */
1644 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1645 pa_sink_assert_ref(s);
1646 pa_assert_ctl_context();
1647
1648 if (p)
1649 pa_proplist_update(s->proplist, mode, p);
1650
1651 if (PA_SINK_IS_LINKED(s->state)) {
1652 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1653 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1654 }
1655
1656 return TRUE;
1657 }
1658
1659 /* Called from main thread */
1660 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1661 void pa_sink_set_description(pa_sink *s, const char *description) {
1662 const char *old;
1663 pa_sink_assert_ref(s);
1664 pa_assert_ctl_context();
1665
1666 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1667 return;
1668
1669 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1670
1671 if (old && description && pa_streq(old, description))
1672 return;
1673
1674 if (description)
1675 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1676 else
1677 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1678
1679 if (s->monitor_source) {
1680 char *n;
1681
1682 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1683 pa_source_set_description(s->monitor_source, n);
1684 pa_xfree(n);
1685 }
1686
1687 if (PA_SINK_IS_LINKED(s->state)) {
1688 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1689 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1690 }
1691 }
1692
1693 /* Called from main thread */
1694 unsigned pa_sink_linked_by(pa_sink *s) {
1695 unsigned ret;
1696
1697 pa_sink_assert_ref(s);
1698 pa_assert_ctl_context();
1699 pa_assert(PA_SINK_IS_LINKED(s->state));
1700
1701 ret = pa_idxset_size(s->inputs);
1702
1703 /* We add in the number of streams connected to us here. Please
1704 * note the asymmmetry to pa_sink_used_by()! */
1705
1706 if (s->monitor_source)
1707 ret += pa_source_linked_by(s->monitor_source);
1708
1709 return ret;
1710 }
1711
1712 /* Called from main thread */
1713 unsigned pa_sink_used_by(pa_sink *s) {
1714 unsigned ret;
1715
1716 pa_sink_assert_ref(s);
1717 pa_assert_ctl_context();
1718 pa_assert(PA_SINK_IS_LINKED(s->state));
1719
1720 ret = pa_idxset_size(s->inputs);
1721 pa_assert(ret >= s->n_corked);
1722
1723 /* Streams connected to our monitor source do not matter for
1724 * pa_sink_used_by()!.*/
1725
1726 return ret - s->n_corked;
1727 }
1728
1729 /* Called from main thread */
1730 unsigned pa_sink_check_suspend(pa_sink *s) {
1731 unsigned ret;
1732 pa_sink_input *i;
1733 uint32_t idx;
1734
1735 pa_sink_assert_ref(s);
1736 pa_assert_ctl_context();
1737
1738 if (!PA_SINK_IS_LINKED(s->state))
1739 return 0;
1740
1741 ret = 0;
1742
1743 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1744 pa_sink_input_state_t st;
1745
1746 st = pa_sink_input_get_state(i);
1747 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1748
1749 if (st == PA_SINK_INPUT_CORKED)
1750 continue;
1751
1752 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1753 continue;
1754
1755 ret ++;
1756 }
1757
1758 if (s->monitor_source)
1759 ret += pa_source_check_suspend(s->monitor_source);
1760
1761 return ret;
1762 }
1763
1764 /* Called from the IO thread */
1765 static void sync_input_volumes_within_thread(pa_sink *s) {
1766 pa_sink_input *i;
1767 void *state = NULL;
1768
1769 pa_sink_assert_ref(s);
1770 pa_sink_assert_io_context(s);
1771
1772 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1773 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1774 continue;
1775
1776 i->thread_info.soft_volume = i->soft_volume;
1777 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1778 }
1779 }
1780
1781 /* Called from IO thread, except when it is not */
1782 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1783 pa_sink *s = PA_SINK(o);
1784 pa_sink_assert_ref(s);
1785
1786 switch ((pa_sink_message_t) code) {
1787
1788 case PA_SINK_MESSAGE_ADD_INPUT: {
1789 pa_sink_input *i = PA_SINK_INPUT(userdata);
1790
1791 /* If you change anything here, make sure to change the
1792 * sink input handling a few lines down at
1793 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1794
1795 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1796
1797 /* Since the caller sleeps in pa_sink_input_put(), we can
1798 * safely access data outside of thread_info even though
1799 * it is mutable */
1800
1801 if ((i->thread_info.sync_prev = i->sync_prev)) {
1802 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1803 pa_assert(i->sync_prev->sync_next == i);
1804 i->thread_info.sync_prev->thread_info.sync_next = i;
1805 }
1806
1807 if ((i->thread_info.sync_next = i->sync_next)) {
1808 pa_assert(i->sink == i->thread_info.sync_next->sink);
1809 pa_assert(i->sync_next->sync_prev == i);
1810 i->thread_info.sync_next->thread_info.sync_prev = i;
1811 }
1812
1813 pa_assert(!i->thread_info.attached);
1814 i->thread_info.attached = TRUE;
1815
1816 if (i->attach)
1817 i->attach(i);
1818
1819 pa_sink_input_set_state_within_thread(i, i->state);
1820
1821 /* The requested latency of the sink input needs to be
1822 * fixed up and then configured on the sink */
1823
1824 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1825 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1826
1827 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1828 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1829
1830 /* We don't rewind here automatically. This is left to the
1831 * sink input implementor because some sink inputs need a
1832 * slow start, i.e. need some time to buffer client
1833 * samples before beginning streaming. */
1834
1835 /* In flat volume mode we need to update the volume as
1836 * well */
1837 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1838 }
1839
1840 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1841 pa_sink_input *i = PA_SINK_INPUT(userdata);
1842
1843 /* If you change anything here, make sure to change the
1844 * sink input handling a few lines down at
1845 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1846
1847 if (i->detach)
1848 i->detach(i);
1849
1850 pa_sink_input_set_state_within_thread(i, i->state);
1851
1852 pa_assert(i->thread_info.attached);
1853 i->thread_info.attached = FALSE;
1854
1855 /* Since the caller sleeps in pa_sink_input_unlink(),
1856 * we can safely access data outside of thread_info even
1857 * though it is mutable */
1858
1859 pa_assert(!i->sync_prev);
1860 pa_assert(!i->sync_next);
1861
1862 if (i->thread_info.sync_prev) {
1863 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1864 i->thread_info.sync_prev = NULL;
1865 }
1866
1867 if (i->thread_info.sync_next) {
1868 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1869 i->thread_info.sync_next = NULL;
1870 }
1871
1872 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1873 pa_sink_input_unref(i);
1874
1875 pa_sink_invalidate_requested_latency(s, TRUE);
1876 pa_sink_request_rewind(s, (size_t) -1);
1877
1878 /* In flat volume mode we need to update the volume as
1879 * well */
1880 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1881 }
1882
1883 case PA_SINK_MESSAGE_START_MOVE: {
1884 pa_sink_input *i = PA_SINK_INPUT(userdata);
1885
1886 /* We don't support moving synchronized streams. */
1887 pa_assert(!i->sync_prev);
1888 pa_assert(!i->sync_next);
1889 pa_assert(!i->thread_info.sync_next);
1890 pa_assert(!i->thread_info.sync_prev);
1891
1892 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1893 pa_usec_t usec = 0;
1894 size_t sink_nbytes, total_nbytes;
1895
1896 /* Get the latency of the sink */
1897 usec = pa_sink_get_latency_within_thread(s);
1898 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1899 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1900
1901 if (total_nbytes > 0) {
1902 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1903 i->thread_info.rewrite_flush = TRUE;
1904 pa_sink_input_process_rewind(i, sink_nbytes);
1905 }
1906 }
1907
1908 if (i->detach)
1909 i->detach(i);
1910
1911 pa_assert(i->thread_info.attached);
1912 i->thread_info.attached = FALSE;
1913
1914 /* Let's remove the sink input ...*/
1915 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1916 pa_sink_input_unref(i);
1917
1918 pa_sink_invalidate_requested_latency(s, TRUE);
1919
1920 pa_log_debug("Requesting rewind due to started move");
1921 pa_sink_request_rewind(s, (size_t) -1);
1922
1923 /* In flat volume mode we need to update the volume as
1924 * well */
1925 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1926 }
1927
1928 case PA_SINK_MESSAGE_FINISH_MOVE: {
1929 pa_sink_input *i = PA_SINK_INPUT(userdata);
1930
1931 /* We don't support moving synchronized streams. */
1932 pa_assert(!i->sync_prev);
1933 pa_assert(!i->sync_next);
1934 pa_assert(!i->thread_info.sync_next);
1935 pa_assert(!i->thread_info.sync_prev);
1936
1937 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1938
1939 pa_assert(!i->thread_info.attached);
1940 i->thread_info.attached = TRUE;
1941
1942 if (i->attach)
1943 i->attach(i);
1944
1945 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1946 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1947
1948 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1949 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1950
1951 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1952 pa_usec_t usec = 0;
1953 size_t nbytes;
1954
1955 /* Get the latency of the sink */
1956 usec = pa_sink_get_latency_within_thread(s);
1957 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1958
1959 if (nbytes > 0)
1960 pa_sink_input_drop(i, nbytes);
1961
1962 pa_log_debug("Requesting rewind due to finished move");
1963 pa_sink_request_rewind(s, nbytes);
1964 }
1965
1966 /* In flat volume mode we need to update the volume as
1967 * well */
1968 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1969 }
1970
1971 case PA_SINK_MESSAGE_SET_VOLUME:
1972
1973 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1974 s->thread_info.soft_volume = s->soft_volume;
1975 pa_sink_request_rewind(s, (size_t) -1);
1976 }
1977
1978 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1979 return 0;
1980
1981 /* Fall through ... */
1982
1983 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1984 sync_input_volumes_within_thread(s);
1985 return 0;
1986
1987 case PA_SINK_MESSAGE_GET_VOLUME:
1988 return 0;
1989
1990 case PA_SINK_MESSAGE_SET_MUTE:
1991
1992 if (s->thread_info.soft_muted != s->muted) {
1993 s->thread_info.soft_muted = s->muted;
1994 pa_sink_request_rewind(s, (size_t) -1);
1995 }
1996
1997 return 0;
1998
1999 case PA_SINK_MESSAGE_GET_MUTE:
2000 return 0;
2001
2002 case PA_SINK_MESSAGE_SET_STATE: {
2003
2004 pa_bool_t suspend_change =
2005 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2006 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2007
2008 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2009
2010 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2011 s->thread_info.rewind_nbytes = 0;
2012 s->thread_info.rewind_requested = FALSE;
2013 }
2014
2015 if (suspend_change) {
2016 pa_sink_input *i;
2017 void *state = NULL;
2018
2019 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2020 if (i->suspend_within_thread)
2021 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2022 }
2023
2024 return 0;
2025 }
2026
2027 case PA_SINK_MESSAGE_DETACH:
2028
2029 /* Detach all streams */
2030 pa_sink_detach_within_thread(s);
2031 return 0;
2032
2033 case PA_SINK_MESSAGE_ATTACH:
2034
2035 /* Reattach all streams */
2036 pa_sink_attach_within_thread(s);
2037 return 0;
2038
2039 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2040
2041 pa_usec_t *usec = userdata;
2042 *usec = pa_sink_get_requested_latency_within_thread(s);
2043
2044 /* Yes, that's right, the IO thread will see -1 when no
2045 * explicit requested latency is configured, the main
2046 * thread will see max_latency */
2047 if (*usec == (pa_usec_t) -1)
2048 *usec = s->thread_info.max_latency;
2049
2050 return 0;
2051 }
2052
2053 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2054 pa_usec_t *r = userdata;
2055
2056 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2057
2058 return 0;
2059 }
2060
2061 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2062 pa_usec_t *r = userdata;
2063
2064 r[0] = s->thread_info.min_latency;
2065 r[1] = s->thread_info.max_latency;
2066
2067 return 0;
2068 }
2069
2070 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2071
2072 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2073 return 0;
2074
2075 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2076
2077 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2078 return 0;
2079
2080 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2081
2082 *((size_t*) userdata) = s->thread_info.max_rewind;
2083 return 0;
2084
2085 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2086
2087 *((size_t*) userdata) = s->thread_info.max_request;
2088 return 0;
2089
2090 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2091
2092 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2093 return 0;
2094
2095 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2096
2097 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2098 return 0;
2099
2100 case PA_SINK_MESSAGE_GET_LATENCY:
2101 case PA_SINK_MESSAGE_MAX:
2102 ;
2103 }
2104
2105 return -1;
2106 }
2107
2108 /* Called from main thread */
2109 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2110 pa_sink *sink;
2111 uint32_t idx;
2112 int ret = 0;
2113
2114 pa_core_assert_ref(c);
2115 pa_assert_ctl_context();
2116 pa_assert(cause != 0);
2117
2118 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2119 int r;
2120
2121 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2122 ret = r;
2123 }
2124
2125 return ret;
2126 }
2127
2128 /* Called from main thread */
2129 void pa_sink_detach(pa_sink *s) {
2130 pa_sink_assert_ref(s);
2131 pa_assert_ctl_context();
2132 pa_assert(PA_SINK_IS_LINKED(s->state));
2133
2134 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2135 }
2136
2137 /* Called from main thread */
2138 void pa_sink_attach(pa_sink *s) {
2139 pa_sink_assert_ref(s);
2140 pa_assert_ctl_context();
2141 pa_assert(PA_SINK_IS_LINKED(s->state));
2142
2143 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2144 }
2145
2146 /* Called from IO thread */
2147 void pa_sink_detach_within_thread(pa_sink *s) {
2148 pa_sink_input *i;
2149 void *state = NULL;
2150
2151 pa_sink_assert_ref(s);
2152 pa_sink_assert_io_context(s);
2153 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2154
2155 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2156 if (i->detach)
2157 i->detach(i);
2158
2159 if (s->monitor_source)
2160 pa_source_detach_within_thread(s->monitor_source);
2161 }
2162
2163 /* Called from IO thread */
2164 void pa_sink_attach_within_thread(pa_sink *s) {
2165 pa_sink_input *i;
2166 void *state = NULL;
2167
2168 pa_sink_assert_ref(s);
2169 pa_sink_assert_io_context(s);
2170 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2171
2172 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2173 if (i->attach)
2174 i->attach(i);
2175
2176 if (s->monitor_source)
2177 pa_source_attach_within_thread(s->monitor_source);
2178 }
2179
2180 /* Called from IO thread */
2181 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2182 pa_sink_assert_ref(s);
2183 pa_sink_assert_io_context(s);
2184 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2185
2186 if (s->thread_info.state == PA_SINK_SUSPENDED)
2187 return;
2188
2189 if (nbytes == (size_t) -1)
2190 nbytes = s->thread_info.max_rewind;
2191
2192 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2193
2194 if (s->thread_info.rewind_requested &&
2195 nbytes <= s->thread_info.rewind_nbytes)
2196 return;
2197
2198 s->thread_info.rewind_nbytes = nbytes;
2199 s->thread_info.rewind_requested = TRUE;
2200
2201 if (s->request_rewind)
2202 s->request_rewind(s);
2203 }
2204
2205 /* Called from IO thread */
2206 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2207 pa_usec_t result = (pa_usec_t) -1;
2208 pa_sink_input *i;
2209 void *state = NULL;
2210 pa_usec_t monitor_latency;
2211
2212 pa_sink_assert_ref(s);
2213 pa_sink_assert_io_context(s);
2214
2215 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2216 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2217
2218 if (s->thread_info.requested_latency_valid)
2219 return s->thread_info.requested_latency;
2220
2221 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2222 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2223 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2224 result = i->thread_info.requested_sink_latency;
2225
2226 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2227
2228 if (monitor_latency != (pa_usec_t) -1 &&
2229 (result == (pa_usec_t) -1 || result > monitor_latency))
2230 result = monitor_latency;
2231
2232 if (result != (pa_usec_t) -1)
2233 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2234
2235 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2236 /* Only cache if properly initialized */
2237 s->thread_info.requested_latency = result;
2238 s->thread_info.requested_latency_valid = TRUE;
2239 }
2240
2241 return result;
2242 }
2243
2244 /* Called from main thread */
2245 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2246 pa_usec_t usec = 0;
2247
2248 pa_sink_assert_ref(s);
2249 pa_assert_ctl_context();
2250 pa_assert(PA_SINK_IS_LINKED(s->state));
2251
2252 if (s->state == PA_SINK_SUSPENDED)
2253 return 0;
2254
2255 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2256 return usec;
2257 }
2258
2259 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2260 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2261 pa_sink_input *i;
2262 void *state = NULL;
2263
2264 pa_sink_assert_ref(s);
2265 pa_sink_assert_io_context(s);
2266
2267 if (max_rewind == s->thread_info.max_rewind)
2268 return;
2269
2270 s->thread_info.max_rewind = max_rewind;
2271
2272 if (PA_SINK_IS_LINKED(s->thread_info.state))
2273 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2274 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2275
2276 if (s->monitor_source)
2277 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2278 }
2279
2280 /* Called from main thread */
2281 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2282 pa_sink_assert_ref(s);
2283 pa_assert_ctl_context();
2284
2285 if (PA_SINK_IS_LINKED(s->state))
2286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2287 else
2288 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2289 }
2290
2291 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2292 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2293 void *state = NULL;
2294
2295 pa_sink_assert_ref(s);
2296 pa_sink_assert_io_context(s);
2297
2298 if (max_request == s->thread_info.max_request)
2299 return;
2300
2301 s->thread_info.max_request = max_request;
2302
2303 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2304 pa_sink_input *i;
2305
2306 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2307 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2308 }
2309 }
2310
2311 /* Called from main thread */
2312 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2313 pa_sink_assert_ref(s);
2314 pa_assert_ctl_context();
2315
2316 if (PA_SINK_IS_LINKED(s->state))
2317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2318 else
2319 pa_sink_set_max_request_within_thread(s, max_request);
2320 }
2321
2322 /* Called from IO thread */
2323 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2324 pa_sink_input *i;
2325 void *state = NULL;
2326
2327 pa_sink_assert_ref(s);
2328 pa_sink_assert_io_context(s);
2329
2330 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2331 s->thread_info.requested_latency_valid = FALSE;
2332 else if (dynamic)
2333 return;
2334
2335 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2336
2337 if (s->update_requested_latency)
2338 s->update_requested_latency(s);
2339
2340 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2341 if (i->update_sink_requested_latency)
2342 i->update_sink_requested_latency(i);
2343 }
2344 }
2345
2346 /* Called from main thread */
2347 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2348 pa_sink_assert_ref(s);
2349 pa_assert_ctl_context();
2350
2351 /* min_latency == 0: no limit
2352 * min_latency anything else: specified limit
2353 *
2354 * Similar for max_latency */
2355
2356 if (min_latency < ABSOLUTE_MIN_LATENCY)
2357 min_latency = ABSOLUTE_MIN_LATENCY;
2358
2359 if (max_latency <= 0 ||
2360 max_latency > ABSOLUTE_MAX_LATENCY)
2361 max_latency = ABSOLUTE_MAX_LATENCY;
2362
2363 pa_assert(min_latency <= max_latency);
2364
2365 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2366 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2367 max_latency == ABSOLUTE_MAX_LATENCY) ||
2368 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2369
2370 if (PA_SINK_IS_LINKED(s->state)) {
2371 pa_usec_t r[2];
2372
2373 r[0] = min_latency;
2374 r[1] = max_latency;
2375
2376 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2377 } else
2378 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2379 }
2380
2381 /* Called from main thread */
2382 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2383 pa_sink_assert_ref(s);
2384 pa_assert_ctl_context();
2385 pa_assert(min_latency);
2386 pa_assert(max_latency);
2387
2388 if (PA_SINK_IS_LINKED(s->state)) {
2389 pa_usec_t r[2] = { 0, 0 };
2390
2391 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2392
2393 *min_latency = r[0];
2394 *max_latency = r[1];
2395 } else {
2396 *min_latency = s->thread_info.min_latency;
2397 *max_latency = s->thread_info.max_latency;
2398 }
2399 }
2400
2401 /* Called from IO thread */
2402 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2403 pa_sink_assert_ref(s);
2404 pa_sink_assert_io_context(s);
2405
2406 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2407 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2408 pa_assert(min_latency <= max_latency);
2409
2410 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2411 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2412 max_latency == ABSOLUTE_MAX_LATENCY) ||
2413 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2414
2415 if (s->thread_info.min_latency == min_latency &&
2416 s->thread_info.max_latency == max_latency)
2417 return;
2418
2419 s->thread_info.min_latency = min_latency;
2420 s->thread_info.max_latency = max_latency;
2421
2422 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2423 pa_sink_input *i;
2424 void *state = NULL;
2425
2426 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2427 if (i->update_sink_latency_range)
2428 i->update_sink_latency_range(i);
2429 }
2430
2431 pa_sink_invalidate_requested_latency(s, FALSE);
2432
2433 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2434 }
2435
2436 /* Called from main thread */
2437 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2438 pa_sink_assert_ref(s);
2439 pa_assert_ctl_context();
2440
2441 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2442 pa_assert(latency == 0);
2443 return;
2444 }
2445
2446 if (latency < ABSOLUTE_MIN_LATENCY)
2447 latency = ABSOLUTE_MIN_LATENCY;
2448
2449 if (latency > ABSOLUTE_MAX_LATENCY)
2450 latency = ABSOLUTE_MAX_LATENCY;
2451
2452 if (PA_SINK_IS_LINKED(s->state))
2453 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2454 else
2455 s->thread_info.fixed_latency = latency;
2456
2457 pa_source_set_fixed_latency(s->monitor_source, latency);
2458 }
2459
2460 /* Called from main thread */
2461 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2462 pa_usec_t latency;
2463
2464 pa_sink_assert_ref(s);
2465 pa_assert_ctl_context();
2466
2467 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2468 return 0;
2469
2470 if (PA_SINK_IS_LINKED(s->state))
2471 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2472 else
2473 latency = s->thread_info.fixed_latency;
2474
2475 return latency;
2476 }
2477
2478 /* Called from IO thread */
2479 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2480 pa_sink_assert_ref(s);
2481 pa_sink_assert_io_context(s);
2482
2483 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2484 pa_assert(latency == 0);
2485 return;
2486 }
2487
2488 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2489 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2490
2491 if (s->thread_info.fixed_latency == latency)
2492 return;
2493
2494 s->thread_info.fixed_latency = latency;
2495
2496 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2497 pa_sink_input *i;
2498 void *state = NULL;
2499
2500 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2501 if (i->update_sink_fixed_latency)
2502 i->update_sink_fixed_latency(i);
2503 }
2504
2505 pa_sink_invalidate_requested_latency(s, FALSE);
2506
2507 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2508 }
2509
2510 /* Called from main context */
2511 size_t pa_sink_get_max_rewind(pa_sink *s) {
2512 size_t r;
2513 pa_sink_assert_ref(s);
2514 pa_assert_ctl_context();
2515
2516 if (!PA_SINK_IS_LINKED(s->state))
2517 return s->thread_info.max_rewind;
2518
2519 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2520
2521 return r;
2522 }
2523
2524 /* Called from main context */
2525 size_t pa_sink_get_max_request(pa_sink *s) {
2526 size_t r;
2527 pa_sink_assert_ref(s);
2528 pa_assert_ctl_context();
2529
2530 if (!PA_SINK_IS_LINKED(s->state))
2531 return s->thread_info.max_request;
2532
2533 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2534
2535 return r;
2536 }
2537
2538 /* Called from main context */
2539 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2540 pa_device_port *port;
2541
2542 pa_sink_assert_ref(s);
2543 pa_assert_ctl_context();
2544
2545 if (!s->set_port) {
2546 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2547 return -PA_ERR_NOTIMPLEMENTED;
2548 }
2549
2550 if (!s->ports)
2551 return -PA_ERR_NOENTITY;
2552
2553 if (!(port = pa_hashmap_get(s->ports, name)))
2554 return -PA_ERR_NOENTITY;
2555
2556 if (s->active_port == port) {
2557 s->save_port = s->save_port || save;
2558 return 0;
2559 }
2560
2561 if ((s->set_port(s, port)) < 0)
2562 return -PA_ERR_NOENTITY;
2563
2564 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2565
2566 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2567
2568 s->active_port = port;
2569 s->save_port = save;
2570
2571 return 0;
2572 }
2573
2574 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2575 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2576
2577 pa_assert(p);
2578
2579 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2580 return TRUE;
2581
2582 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2583
2584 if (pa_streq(ff, "microphone"))
2585 t = "audio-input-microphone";
2586 else if (pa_streq(ff, "webcam"))
2587 t = "camera-web";
2588 else if (pa_streq(ff, "computer"))
2589 t = "computer";
2590 else if (pa_streq(ff, "handset"))
2591 t = "phone";
2592 else if (pa_streq(ff, "portable"))
2593 t = "multimedia-player";
2594 else if (pa_streq(ff, "tv"))
2595 t = "video-display";
2596
2597 /*
2598 * The following icons are not part of the icon naming spec,
2599 * because Rodney Dawes sucks as the maintainer of that spec.
2600 *
2601 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2602 */
2603 else if (pa_streq(ff, "headset"))
2604 t = "audio-headset";
2605 else if (pa_streq(ff, "headphone"))
2606 t = "audio-headphones";
2607 else if (pa_streq(ff, "speaker"))
2608 t = "audio-speakers";
2609 else if (pa_streq(ff, "hands-free"))
2610 t = "audio-handsfree";
2611 }
2612
2613 if (!t)
2614 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2615 if (pa_streq(c, "modem"))
2616 t = "modem";
2617
2618 if (!t) {
2619 if (is_sink)
2620 t = "audio-card";
2621 else
2622 t = "audio-input-microphone";
2623 }
2624
2625 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2626 if (strstr(profile, "analog"))
2627 s = "-analog";
2628 else if (strstr(profile, "iec958"))
2629 s = "-iec958";
2630 else if (strstr(profile, "hdmi"))
2631 s = "-hdmi";
2632 }
2633
2634 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2635
2636 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2637
2638 return TRUE;
2639 }
2640
2641 pa_bool_t pa_device_init_description(pa_proplist *p) {
2642 const char *s, *d = NULL, *k;
2643 pa_assert(p);
2644
2645 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2646 return TRUE;
2647
2648 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2649 if (pa_streq(s, "internal"))
2650 d = _("Internal Audio");
2651
2652 if (!d)
2653 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2654 if (pa_streq(s, "modem"))
2655 d = _("Modem");
2656
2657 if (!d)
2658 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2659
2660 if (!d)
2661 return FALSE;
2662
2663 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2664
2665 if (d && k)
2666 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2667 else if (d)
2668 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2669
2670 return TRUE;
2671 }
2672
2673 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2674 const char *s;
2675 pa_assert(p);
2676
2677 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2678 return TRUE;
2679
2680 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2681 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2682 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2683 return TRUE;
2684 }
2685
2686 return FALSE;
2687 }