]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
remap: fix build for non-x86 builds
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->real_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
309
310 /* FIXME: This should probably be moved to pa_sink_put() */
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312
313 if (s->card)
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
318 s->index,
319 s->name,
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
322 pt);
323 pa_xfree(pt);
324
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
332
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340
341 pa_source_new_data_done(&source_data);
342
343 if (!s->monitor_source) {
344 pa_sink_unlink(s);
345 pa_sink_unref(s);
346 return NULL;
347 }
348
349 s->monitor_source->monitor_of = s;
350
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
353 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
354
355 return s;
356 }
357
358 /* Called from main context */
359 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
360 int ret;
361 pa_bool_t suspend_change;
362 pa_sink_state_t original_state;
363
364 pa_assert(s);
365 pa_assert_ctl_context();
366
367 if (s->state == state)
368 return 0;
369
370 original_state = s->state;
371
372 suspend_change =
373 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
374 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
375
376 if (s->set_state)
377 if ((ret = s->set_state(s, state)) < 0)
378 return ret;
379
380 if (s->asyncmsgq)
381 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
382
383 if (s->set_state)
384 s->set_state(s, original_state);
385
386 return ret;
387 }
388
389 s->state = state;
390
391 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
392 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
393 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
394 }
395
396 if (suspend_change) {
397 pa_sink_input *i;
398 uint32_t idx;
399
400 /* We're suspending or resuming, tell everyone about it */
401
402 PA_IDXSET_FOREACH(i, s->inputs, idx)
403 if (s->state == PA_SINK_SUSPENDED &&
404 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
405 pa_sink_input_kill(i);
406 else if (i->suspend)
407 i->suspend(i, state == PA_SINK_SUSPENDED);
408
409 if (s->monitor_source)
410 pa_source_sync_suspend(s->monitor_source);
411 }
412
413 return 0;
414 }
415
416 /* Called from main context */
417 void pa_sink_put(pa_sink* s) {
418 pa_sink_assert_ref(s);
419 pa_assert_ctl_context();
420
421 pa_assert(s->state == PA_SINK_INIT);
422
423 /* The following fields must be initialized properly when calling _put() */
424 pa_assert(s->asyncmsgq);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
426
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
430
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
433
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
436
437 /* We assume that if the sink implementor changed the default
438 * volume he did so in real_volume, because that is the usual
439 * place where he is supposed to place his changes. */
440 s->reference_volume = s->real_volume;
441
442 s->thread_info.soft_volume = s->soft_volume;
443 s->thread_info.soft_muted = s->muted;
444
445 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
446 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
447 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
448 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
449 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
450
451 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
452 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
453 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
454
455 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
456
457 pa_source_put(s->monitor_source);
458
459 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
460 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
461 }
462
463 /* Called from main context */
464 void pa_sink_unlink(pa_sink* s) {
465 pa_bool_t linked;
466 pa_sink_input *i, *j = NULL;
467
468 pa_assert(s);
469 pa_assert_ctl_context();
470
471 /* Please note that pa_sink_unlink() does more than simply
472 * reversing pa_sink_put(). It also undoes the registrations
473 * already done in pa_sink_new()! */
474
475 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
476 * may be called multiple times on the same sink without bad
477 * effects. */
478
479 linked = PA_SINK_IS_LINKED(s->state);
480
481 if (linked)
482 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
483
484 if (s->state != PA_SINK_UNLINKED)
485 pa_namereg_unregister(s->core, s->name);
486 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
487
488 if (s->card)
489 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
490
491 while ((i = pa_idxset_first(s->inputs, NULL))) {
492 pa_assert(i != j);
493 pa_sink_input_kill(i);
494 j = i;
495 }
496
497 if (linked)
498 sink_set_state(s, PA_SINK_UNLINKED);
499 else
500 s->state = PA_SINK_UNLINKED;
501
502 reset_callbacks(s);
503
504 if (s->monitor_source)
505 pa_source_unlink(s->monitor_source);
506
507 if (linked) {
508 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
509 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
510 }
511 }
512
513 /* Called from main context */
514 static void sink_free(pa_object *o) {
515 pa_sink *s = PA_SINK(o);
516 pa_sink_input *i;
517
518 pa_assert(s);
519 pa_assert_ctl_context();
520 pa_assert(pa_sink_refcnt(s) == 0);
521
522 if (PA_SINK_IS_LINKED(s->state))
523 pa_sink_unlink(s);
524
525 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
526
527 if (s->monitor_source) {
528 pa_source_unref(s->monitor_source);
529 s->monitor_source = NULL;
530 }
531
532 pa_idxset_free(s->inputs, NULL, NULL);
533
534 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
535 pa_sink_input_unref(i);
536
537 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
538
539 if (s->silence.memblock)
540 pa_memblock_unref(s->silence.memblock);
541
542 pa_xfree(s->name);
543 pa_xfree(s->driver);
544
545 if (s->proplist)
546 pa_proplist_free(s->proplist);
547
548 if (s->ports) {
549 pa_device_port *p;
550
551 while ((p = pa_hashmap_steal_first(s->ports)))
552 pa_device_port_free(p);
553
554 pa_hashmap_free(s->ports, NULL, NULL);
555 }
556
557 pa_xfree(s);
558 }
559
560 /* Called from main context, and not while the IO thread is active, please */
561 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
562 pa_sink_assert_ref(s);
563 pa_assert_ctl_context();
564
565 s->asyncmsgq = q;
566
567 if (s->monitor_source)
568 pa_source_set_asyncmsgq(s->monitor_source, q);
569 }
570
571 /* Called from main context, and not while the IO thread is active, please */
572 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
573 pa_sink_assert_ref(s);
574 pa_assert_ctl_context();
575
576 if (mask == 0)
577 return;
578
579 /* For now, allow only a minimal set of flags to be changed. */
580 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
581
582 s->flags = (s->flags & ~mask) | (value & mask);
583
584 pa_source_update_flags(s->monitor_source,
585 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
586 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
587 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
588 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
589 }
590
591 /* Called from IO context, or before _put() from main context */
592 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
593 pa_sink_assert_ref(s);
594 pa_sink_assert_io_context(s);
595
596 s->thread_info.rtpoll = p;
597
598 if (s->monitor_source)
599 pa_source_set_rtpoll(s->monitor_source, p);
600 }
601
602 /* Called from main context */
603 int pa_sink_update_status(pa_sink*s) {
604 pa_sink_assert_ref(s);
605 pa_assert_ctl_context();
606 pa_assert(PA_SINK_IS_LINKED(s->state));
607
608 if (s->state == PA_SINK_SUSPENDED)
609 return 0;
610
611 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
612 }
613
614 /* Called from main context */
615 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
616 pa_sink_assert_ref(s);
617 pa_assert_ctl_context();
618 pa_assert(PA_SINK_IS_LINKED(s->state));
619 pa_assert(cause != 0);
620
621 if (suspend) {
622 s->suspend_cause |= cause;
623 s->monitor_source->suspend_cause |= cause;
624 } else {
625 s->suspend_cause &= ~cause;
626 s->monitor_source->suspend_cause &= ~cause;
627 }
628
629 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
630 return 0;
631
632 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
633
634 if (s->suspend_cause)
635 return sink_set_state(s, PA_SINK_SUSPENDED);
636 else
637 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
638 }
639
640 /* Called from main context */
641 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
642 pa_sink_input *i, *n;
643 uint32_t idx;
644
645 pa_sink_assert_ref(s);
646 pa_assert_ctl_context();
647 pa_assert(PA_SINK_IS_LINKED(s->state));
648
649 if (!q)
650 q = pa_queue_new();
651
652 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
653 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
654
655 pa_sink_input_ref(i);
656
657 if (pa_sink_input_start_move(i) >= 0)
658 pa_queue_push(q, i);
659 else
660 pa_sink_input_unref(i);
661 }
662
663 return q;
664 }
665
666 /* Called from main context */
667 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
668 pa_sink_input *i;
669
670 pa_sink_assert_ref(s);
671 pa_assert_ctl_context();
672 pa_assert(PA_SINK_IS_LINKED(s->state));
673 pa_assert(q);
674
675 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
676 if (pa_sink_input_finish_move(i, s, save) < 0)
677 pa_sink_input_fail_move(i);
678
679 pa_sink_input_unref(i);
680 }
681
682 pa_queue_free(q, NULL, NULL);
683 }
684
685 /* Called from main context */
686 void pa_sink_move_all_fail(pa_queue *q) {
687 pa_sink_input *i;
688
689 pa_assert_ctl_context();
690 pa_assert(q);
691
692 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
693 pa_sink_input_fail_move(i);
694 pa_sink_input_unref(i);
695 }
696
697 pa_queue_free(q, NULL, NULL);
698 }
699
700 /* Called from IO thread context */
701 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
702 pa_sink_input *i;
703 void *state = NULL;
704
705 pa_sink_assert_ref(s);
706 pa_sink_assert_io_context(s);
707 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
708
709 /* If nobody requested this and this is actually no real rewind
710 * then we can short cut this. Please note that this means that
711 * not all rewind requests triggered upstream will always be
712 * translated in actual requests! */
713 if (!s->thread_info.rewind_requested && nbytes <= 0)
714 return;
715
716 s->thread_info.rewind_nbytes = 0;
717 s->thread_info.rewind_requested = FALSE;
718
719 if (s->thread_info.state == PA_SINK_SUSPENDED)
720 return;
721
722 if (nbytes > 0)
723 pa_log_debug("Processing rewind...");
724
725 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
726 pa_sink_input_assert_ref(i);
727 pa_sink_input_process_rewind(i, nbytes);
728 }
729
730 if (nbytes > 0)
731 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
732 pa_source_process_rewind(s->monitor_source, nbytes);
733 }
734
735 /* Called from IO thread context */
736 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
737 pa_sink_input *i;
738 unsigned n = 0;
739 void *state = NULL;
740 size_t mixlength = *length;
741
742 pa_sink_assert_ref(s);
743 pa_sink_assert_io_context(s);
744 pa_assert(info);
745
746 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
747 pa_sink_input_assert_ref(i);
748
749 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
750
751 if (mixlength == 0 || info->chunk.length < mixlength)
752 mixlength = info->chunk.length;
753
754 if (pa_memblock_is_silence(info->chunk.memblock)) {
755 pa_memblock_unref(info->chunk.memblock);
756 continue;
757 }
758
759 info->userdata = pa_sink_input_ref(i);
760
761 pa_assert(info->chunk.memblock);
762 pa_assert(info->chunk.length > 0);
763
764 info++;
765 n++;
766 maxinfo--;
767 }
768
769 if (mixlength > 0)
770 *length = mixlength;
771
772 return n;
773 }
774
775 /* Called from IO thread context */
776 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
777 pa_sink_input *i;
778 void *state = NULL;
779 unsigned p = 0;
780 unsigned n_unreffed = 0;
781
782 pa_sink_assert_ref(s);
783 pa_sink_assert_io_context(s);
784 pa_assert(result);
785 pa_assert(result->memblock);
786 pa_assert(result->length > 0);
787
788 /* We optimize for the case where the order of the inputs has not changed */
789
790 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
791 unsigned j;
792 pa_mix_info* m = NULL;
793
794 pa_sink_input_assert_ref(i);
795
796 /* Let's try to find the matching entry info the pa_mix_info array */
797 for (j = 0; j < n; j ++) {
798
799 if (info[p].userdata == i) {
800 m = info + p;
801 break;
802 }
803
804 p++;
805 if (p >= n)
806 p = 0;
807 }
808
809 /* Drop read data */
810 pa_sink_input_drop(i, result->length);
811
812 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
813
814 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
815 void *ostate = NULL;
816 pa_source_output *o;
817 pa_memchunk c;
818
819 if (m && m->chunk.memblock) {
820 c = m->chunk;
821 pa_memblock_ref(c.memblock);
822 pa_assert(result->length <= c.length);
823 c.length = result->length;
824
825 pa_memchunk_make_writable(&c, 0);
826 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
827 } else {
828 c = s->silence;
829 pa_memblock_ref(c.memblock);
830 pa_assert(result->length <= c.length);
831 c.length = result->length;
832 }
833
834 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
835 pa_source_output_assert_ref(o);
836 pa_assert(o->direct_on_input == i);
837 pa_source_post_direct(s->monitor_source, o, &c);
838 }
839
840 pa_memblock_unref(c.memblock);
841 }
842 }
843
844 if (m) {
845 if (m->chunk.memblock)
846 pa_memblock_unref(m->chunk.memblock);
847 pa_memchunk_reset(&m->chunk);
848
849 pa_sink_input_unref(m->userdata);
850 m->userdata = NULL;
851
852 n_unreffed += 1;
853 }
854 }
855
856 /* Now drop references to entries that are included in the
857 * pa_mix_info array but don't exist anymore */
858
859 if (n_unreffed < n) {
860 for (; n > 0; info++, n--) {
861 if (info->userdata)
862 pa_sink_input_unref(info->userdata);
863 if (info->chunk.memblock)
864 pa_memblock_unref(info->chunk.memblock);
865 }
866 }
867
868 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
869 pa_source_post(s->monitor_source, result);
870 }
871
872 /* Called from IO thread context */
873 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
874 pa_mix_info info[MAX_MIX_CHANNELS];
875 unsigned n;
876 size_t block_size_max;
877
878 pa_sink_assert_ref(s);
879 pa_sink_assert_io_context(s);
880 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
881 pa_assert(pa_frame_aligned(length, &s->sample_spec));
882 pa_assert(result);
883
884 pa_sink_ref(s);
885
886 pa_assert(!s->thread_info.rewind_requested);
887 pa_assert(s->thread_info.rewind_nbytes == 0);
888
889 if (s->thread_info.state == PA_SINK_SUSPENDED) {
890 result->memblock = pa_memblock_ref(s->silence.memblock);
891 result->index = s->silence.index;
892 result->length = PA_MIN(s->silence.length, length);
893 return;
894 }
895
896 if (length <= 0)
897 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
898
899 block_size_max = pa_mempool_block_size_max(s->core->mempool);
900 if (length > block_size_max)
901 length = pa_frame_align(block_size_max, &s->sample_spec);
902
903 pa_assert(length > 0);
904
905 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
906
907 if (n == 0) {
908
909 *result = s->silence;
910 pa_memblock_ref(result->memblock);
911
912 if (result->length > length)
913 result->length = length;
914
915 } else if (n == 1) {
916 pa_cvolume volume;
917
918 *result = info[0].chunk;
919 pa_memblock_ref(result->memblock);
920
921 if (result->length > length)
922 result->length = length;
923
924 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
925
926 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
927 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
928 pa_memblock_unref(result->memblock);
929 pa_silence_memchunk_get(&s->core->silence_cache,
930 s->core->mempool,
931 result,
932 &s->sample_spec,
933 result->length);
934 } else {
935 pa_memchunk_make_writable(result, 0);
936 pa_volume_memchunk(result, &s->sample_spec, &volume);
937 }
938 }
939 } else {
940 void *ptr;
941 result->memblock = pa_memblock_new(s->core->mempool, length);
942
943 ptr = pa_memblock_acquire(result->memblock);
944 result->length = pa_mix(info, n,
945 ptr, length,
946 &s->sample_spec,
947 &s->thread_info.soft_volume,
948 s->thread_info.soft_muted);
949 pa_memblock_release(result->memblock);
950
951 result->index = 0;
952 }
953
954 inputs_drop(s, info, n, result);
955
956 pa_sink_unref(s);
957 }
958
959 /* Called from IO thread context */
960 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
961 pa_mix_info info[MAX_MIX_CHANNELS];
962 unsigned n;
963 size_t length, block_size_max;
964
965 pa_sink_assert_ref(s);
966 pa_sink_assert_io_context(s);
967 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
968 pa_assert(target);
969 pa_assert(target->memblock);
970 pa_assert(target->length > 0);
971 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
972
973 pa_sink_ref(s);
974
975 pa_assert(!s->thread_info.rewind_requested);
976 pa_assert(s->thread_info.rewind_nbytes == 0);
977
978 if (s->thread_info.state == PA_SINK_SUSPENDED) {
979 pa_silence_memchunk(target, &s->sample_spec);
980 return;
981 }
982
983 length = target->length;
984 block_size_max = pa_mempool_block_size_max(s->core->mempool);
985 if (length > block_size_max)
986 length = pa_frame_align(block_size_max, &s->sample_spec);
987
988 pa_assert(length > 0);
989
990 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
991
992 if (n == 0) {
993 if (target->length > length)
994 target->length = length;
995
996 pa_silence_memchunk(target, &s->sample_spec);
997 } else if (n == 1) {
998 pa_cvolume volume;
999
1000 if (target->length > length)
1001 target->length = length;
1002
1003 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1004
1005 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1006 pa_silence_memchunk(target, &s->sample_spec);
1007 else {
1008 pa_memchunk vchunk;
1009
1010 vchunk = info[0].chunk;
1011 pa_memblock_ref(vchunk.memblock);
1012
1013 if (vchunk.length > length)
1014 vchunk.length = length;
1015
1016 if (!pa_cvolume_is_norm(&volume)) {
1017 pa_memchunk_make_writable(&vchunk, 0);
1018 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1019 }
1020
1021 pa_memchunk_memcpy(target, &vchunk);
1022 pa_memblock_unref(vchunk.memblock);
1023 }
1024
1025 } else {
1026 void *ptr;
1027
1028 ptr = pa_memblock_acquire(target->memblock);
1029
1030 target->length = pa_mix(info, n,
1031 (uint8_t*) ptr + target->index, length,
1032 &s->sample_spec,
1033 &s->thread_info.soft_volume,
1034 s->thread_info.soft_muted);
1035
1036 pa_memblock_release(target->memblock);
1037 }
1038
1039 inputs_drop(s, info, n, target);
1040
1041 pa_sink_unref(s);
1042 }
1043
1044 /* Called from IO thread context */
1045 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1046 pa_memchunk chunk;
1047 size_t l, d;
1048
1049 pa_sink_assert_ref(s);
1050 pa_sink_assert_io_context(s);
1051 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1052 pa_assert(target);
1053 pa_assert(target->memblock);
1054 pa_assert(target->length > 0);
1055 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1056
1057 pa_sink_ref(s);
1058
1059 pa_assert(!s->thread_info.rewind_requested);
1060 pa_assert(s->thread_info.rewind_nbytes == 0);
1061
1062 l = target->length;
1063 d = 0;
1064 while (l > 0) {
1065 chunk = *target;
1066 chunk.index += d;
1067 chunk.length -= d;
1068
1069 pa_sink_render_into(s, &chunk);
1070
1071 d += chunk.length;
1072 l -= chunk.length;
1073 }
1074
1075 pa_sink_unref(s);
1076 }
1077
1078 /* Called from IO thread context */
1079 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1080 pa_mix_info info[MAX_MIX_CHANNELS];
1081 size_t length1st = length;
1082 unsigned n;
1083
1084 pa_sink_assert_ref(s);
1085 pa_sink_assert_io_context(s);
1086 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1087 pa_assert(length > 0);
1088 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1089 pa_assert(result);
1090
1091 pa_sink_ref(s);
1092
1093 pa_assert(!s->thread_info.rewind_requested);
1094 pa_assert(s->thread_info.rewind_nbytes == 0);
1095
1096 pa_assert(length > 0);
1097
1098 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1099
1100 if (n == 0) {
1101 pa_silence_memchunk_get(&s->core->silence_cache,
1102 s->core->mempool,
1103 result,
1104 &s->sample_spec,
1105 length1st);
1106 } else if (n == 1) {
1107 pa_cvolume volume;
1108
1109 *result = info[0].chunk;
1110 pa_memblock_ref(result->memblock);
1111
1112 if (result->length > length)
1113 result->length = length;
1114
1115 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1116
1117 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1118 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1119 pa_memblock_unref(result->memblock);
1120 pa_silence_memchunk_get(&s->core->silence_cache,
1121 s->core->mempool,
1122 result,
1123 &s->sample_spec,
1124 result->length);
1125 } else {
1126 pa_memchunk_make_writable(result, length);
1127 pa_volume_memchunk(result, &s->sample_spec, &volume);
1128 }
1129 }
1130 } else {
1131 void *ptr;
1132
1133 result->index = 0;
1134 result->memblock = pa_memblock_new(s->core->mempool, length);
1135
1136 ptr = pa_memblock_acquire(result->memblock);
1137
1138 result->length = pa_mix(info, n,
1139 (uint8_t*) ptr + result->index, length1st,
1140 &s->sample_spec,
1141 &s->thread_info.soft_volume,
1142 s->thread_info.soft_muted);
1143
1144 pa_memblock_release(result->memblock);
1145 }
1146
1147 inputs_drop(s, info, n, result);
1148
1149 if (result->length < length) {
1150 pa_memchunk chunk;
1151 size_t l, d;
1152 pa_memchunk_make_writable(result, length);
1153
1154 l = length - result->length;
1155 d = result->index + result->length;
1156 while (l > 0) {
1157 chunk = *result;
1158 chunk.index = d;
1159 chunk.length = l;
1160
1161 pa_sink_render_into(s, &chunk);
1162
1163 d += chunk.length;
1164 l -= chunk.length;
1165 }
1166 result->length = length;
1167 }
1168
1169 pa_sink_unref(s);
1170 }
1171
1172 /* Called from main thread */
1173 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1174 pa_usec_t usec = 0;
1175
1176 pa_sink_assert_ref(s);
1177 pa_assert_ctl_context();
1178 pa_assert(PA_SINK_IS_LINKED(s->state));
1179
1180 /* The returned value is supposed to be in the time domain of the sound card! */
1181
1182 if (s->state == PA_SINK_SUSPENDED)
1183 return 0;
1184
1185 if (!(s->flags & PA_SINK_LATENCY))
1186 return 0;
1187
1188 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1189
1190 return usec;
1191 }
1192
1193 /* Called from IO thread */
1194 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1195 pa_usec_t usec = 0;
1196 pa_msgobject *o;
1197
1198 pa_sink_assert_ref(s);
1199 pa_sink_assert_io_context(s);
1200 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1201
1202 /* The returned value is supposed to be in the time domain of the sound card! */
1203
1204 if (s->thread_info.state == PA_SINK_SUSPENDED)
1205 return 0;
1206
1207 if (!(s->flags & PA_SINK_LATENCY))
1208 return 0;
1209
1210 o = PA_MSGOBJECT(s);
1211
1212 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1213
1214 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1215 return -1;
1216
1217 return usec;
1218 }
1219
1220 /* Called from main context */
1221 static void compute_reference_ratios(pa_sink *s) {
1222 uint32_t idx;
1223 pa_sink_input *i;
1224
1225 pa_sink_assert_ref(s);
1226 pa_assert_ctl_context();
1227 pa_assert(PA_SINK_IS_LINKED(s->state));
1228 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1229
1230 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1231 unsigned c;
1232 pa_cvolume remapped;
1233
1234 /*
1235 * Calculates the reference volume from the sink's reference
1236 * volume. This basically calculates:
1237 *
1238 * i->reference_ratio = i->volume / s->reference_volume
1239 */
1240
1241 remapped = s->reference_volume;
1242 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1243
1244 i->reference_ratio.channels = i->sample_spec.channels;
1245
1246 for (c = 0; c < i->sample_spec.channels; c++) {
1247
1248 /* We don't update when the sink volume is 0 anyway */
1249 if (remapped.values[c] <= PA_VOLUME_MUTED)
1250 continue;
1251
1252 /* Don't update the reference ratio unless necessary */
1253 if (pa_sw_volume_multiply(
1254 i->reference_ratio.values[c],
1255 remapped.values[c]) == i->volume.values[c])
1256 continue;
1257
1258 i->reference_ratio.values[c] = pa_sw_volume_divide(
1259 i->volume.values[c],
1260 remapped.values[c]);
1261 }
1262 }
1263 }
1264
1265 /* Called from main context */
1266 static void compute_real_ratios(pa_sink *s) {
1267 pa_sink_input *i;
1268 uint32_t idx;
1269
1270 pa_sink_assert_ref(s);
1271 pa_assert_ctl_context();
1272 pa_assert(PA_SINK_IS_LINKED(s->state));
1273 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1274
1275 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1276 unsigned c;
1277 pa_cvolume remapped;
1278
1279 /*
1280 * This basically calculates:
1281 *
1282 * i->real_ratio := i->volume / s->real_volume
1283 * i->soft_volume := i->real_ratio * i->volume_factor
1284 */
1285
1286 remapped = s->real_volume;
1287 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1288
1289 i->real_ratio.channels = i->sample_spec.channels;
1290 i->soft_volume.channels = i->sample_spec.channels;
1291
1292 for (c = 0; c < i->sample_spec.channels; c++) {
1293
1294 if (remapped.values[c] <= PA_VOLUME_MUTED) {
1295 /* We leave i->real_ratio untouched */
1296 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1297 continue;
1298 }
1299
1300 /* Don't lose accuracy unless necessary */
1301 if (pa_sw_volume_multiply(
1302 i->real_ratio.values[c],
1303 remapped.values[c]) != i->volume.values[c])
1304
1305 i->real_ratio.values[c] = pa_sw_volume_divide(
1306 i->volume.values[c],
1307 remapped.values[c]);
1308
1309 i->soft_volume.values[c] = pa_sw_volume_multiply(
1310 i->real_ratio.values[c],
1311 i->volume_factor.values[c]);
1312 }
1313
1314 /* We don't copy the soft_volume to the thread_info data
1315 * here. That must be done by the caller */
1316 }
1317 }
1318
1319 /* Called from main thread */
1320 static void compute_real_volume(pa_sink *s) {
1321 pa_sink_input *i;
1322 uint32_t idx;
1323
1324 pa_sink_assert_ref(s);
1325 pa_assert_ctl_context();
1326 pa_assert(PA_SINK_IS_LINKED(s->state));
1327 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1328
1329 /* This determines the maximum volume of all streams and sets
1330 * s->real_volume accordingly. */
1331
1332 if (pa_idxset_isempty(s->inputs)) {
1333 /* In the special case that we have no sink input we leave the
1334 * volume unmodified. */
1335 s->real_volume = s->reference_volume;
1336 return;
1337 }
1338
1339 pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
1340
1341 /* First let's determine the new maximum volume of all inputs
1342 * connected to this sink */
1343 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1344 pa_cvolume remapped;
1345
1346 remapped = i->volume;
1347 pa_cvolume_remap(&remapped, &i->channel_map, &s->channel_map);
1348 pa_cvolume_merge(&s->real_volume, &s->real_volume, &remapped);
1349 }
1350
1351 /* Then, let's update the real ratios/soft volumes of all inputs
1352 * connected to this sink */
1353 compute_real_ratios(s);
1354 }
1355
1356 /* Called from main thread */
1357 static void propagate_reference_volume(pa_sink *s) {
1358 pa_sink_input *i;
1359 uint32_t idx;
1360
1361 pa_sink_assert_ref(s);
1362 pa_assert_ctl_context();
1363 pa_assert(PA_SINK_IS_LINKED(s->state));
1364 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1365
1366 /* This is called whenever the sink volume changes that is not
1367 * caused by a sink input volume change. We need to fix up the
1368 * sink input volumes accordingly */
1369
1370 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1371 pa_cvolume old_volume, remapped;
1372
1373 old_volume = i->volume;
1374
1375 /* This basically calculates:
1376 *
1377 * i->volume := s->reference_volume * i->reference_ratio */
1378
1379 remapped = s->reference_volume;
1380 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1381 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1382
1383 /* The volume changed, let's tell people so */
1384 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1385
1386 if (i->volume_changed)
1387 i->volume_changed(i);
1388
1389 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1390 }
1391 }
1392 }
1393
1394 /* Called from main thread */
1395 void pa_sink_set_volume(
1396 pa_sink *s,
1397 const pa_cvolume *volume,
1398 pa_bool_t sendmsg,
1399 pa_bool_t save) {
1400
1401 pa_cvolume old_reference_volume;
1402 pa_bool_t reference_changed;
1403
1404 pa_sink_assert_ref(s);
1405 pa_assert_ctl_context();
1406 pa_assert(PA_SINK_IS_LINKED(s->state));
1407 pa_assert(!volume || pa_cvolume_valid(volume));
1408 pa_assert(!volume || pa_cvolume_compatible(volume, &s->sample_spec));
1409 pa_assert(volume || (s->flags & PA_SINK_FLAT_VOLUME));
1410
1411 /* If volume is NULL we synchronize the sink's real and reference
1412 * volumes with the stream volumes. If it is not NULL we update
1413 * the reference_volume with it. */
1414
1415 old_reference_volume = s->reference_volume;
1416
1417 if (volume) {
1418
1419 s->reference_volume = *volume;
1420
1421 if (s->flags & PA_SINK_FLAT_VOLUME) {
1422 /* OK, propagate this volume change back to the inputs */
1423 propagate_reference_volume(s);
1424
1425 /* And now recalculate the real volume */
1426 compute_real_volume(s);
1427 } else
1428 s->real_volume = s->reference_volume;
1429
1430 } else {
1431 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1432
1433 /* Ok, let's determine the new real volume */
1434 compute_real_volume(s);
1435
1436 /* Let's 'push' the reference volume if necessary */
1437 pa_cvolume_merge(&s->reference_volume, &s->reference_volume, &s->real_volume);
1438
1439 /* We need to fix the reference ratios of all streams now that
1440 * we changed the reference volume */
1441 compute_reference_ratios(s);
1442 }
1443
1444 reference_changed = !pa_cvolume_equal(&old_reference_volume, &s->reference_volume);
1445 s->save_volume = (!reference_changed && s->save_volume) || save;
1446
1447 if (s->set_volume) {
1448 /* If we have a function set_volume(), then we do not apply a
1449 * soft volume by default. However, set_volume() is free to
1450 * apply one to s->soft_volume */
1451
1452 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1453 s->set_volume(s);
1454
1455 } else
1456 /* If we have no function set_volume(), then the soft volume
1457 * becomes the virtual volume */
1458 s->soft_volume = s->real_volume;
1459
1460 /* This tells the sink that soft and/or virtual volume changed */
1461 if (sendmsg)
1462 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1463
1464 if (reference_changed)
1465 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1466 }
1467
1468 /* Called from main thread. Only to be called by sink implementor */
1469 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1470 pa_sink_assert_ref(s);
1471 pa_assert_ctl_context();
1472
1473 if (!volume)
1474 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1475 else
1476 s->soft_volume = *volume;
1477
1478 if (PA_SINK_IS_LINKED(s->state))
1479 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1480 else
1481 s->thread_info.soft_volume = s->soft_volume;
1482 }
1483
1484 static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
1485 pa_sink_input *i;
1486 uint32_t idx;
1487 pa_cvolume old_reference_volume;
1488
1489 pa_sink_assert_ref(s);
1490 pa_assert_ctl_context();
1491 pa_assert(PA_SINK_IS_LINKED(s->state));
1492
1493 /* This is called when the hardware's real volume changes due to
1494 * some external event. We copy the real volume into our
1495 * reference volume and then rebuild the stream volumes based on
1496 * i->real_ratio which should stay fixed. */
1497
1498 if (pa_cvolume_equal(old_real_volume, &s->real_volume))
1499 return;
1500
1501 old_reference_volume = s->reference_volume;
1502
1503 /* 1. Make the real volume the reference volume */
1504 s->reference_volume = s->real_volume;
1505
1506 if (s->flags & PA_SINK_FLAT_VOLUME) {
1507
1508 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1509 pa_cvolume old_volume, remapped;
1510
1511 old_volume = i->volume;
1512
1513 /* 2. Since the sink's reference and real volumes are equal
1514 * now our ratios should be too. */
1515 i->reference_ratio = i->real_ratio;
1516
1517 /* 3. Recalculate the new stream reference volume based on the
1518 * reference ratio and the sink's reference volume.
1519 *
1520 * This basically calculates:
1521 *
1522 * i->volume = s->reference_volume * i->reference_ratio
1523 *
1524 * This is identical to propagate_reference_volume() */
1525 remapped = s->reference_volume;
1526 pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
1527 pa_sw_cvolume_multiply(&i->volume, &remapped, &i->reference_ratio);
1528
1529 /* Notify if something changed */
1530 if (!pa_cvolume_equal(&old_volume, &i->volume)) {
1531
1532 if (i->volume_changed)
1533 i->volume_changed(i);
1534
1535 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1536 }
1537 }
1538 }
1539
1540 /* Something got changed in the hardware. It probably makes sense
1541 * to save changed hw settings given that hw volume changes not
1542 * triggered by PA are almost certainly done by the user. */
1543 s->save_volume = TRUE;
1544
1545 if (!pa_cvolume_equal(&old_reference_volume, &s->reference_volume))
1546 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1547 }
1548
1549 /* Called from main thread */
1550 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
1551 pa_sink_assert_ref(s);
1552 pa_assert_ctl_context();
1553 pa_assert(PA_SINK_IS_LINKED(s->state));
1554
1555 if (s->refresh_volume || force_refresh) {
1556 struct pa_cvolume old_real_volume;
1557
1558 old_real_volume = s->real_volume;
1559
1560 if (s->get_volume)
1561 s->get_volume(s);
1562
1563 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1564
1565 propagate_real_volume(s, &old_real_volume);
1566 }
1567
1568 return &s->reference_volume;
1569 }
1570
1571 /* Called from main thread */
1572 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
1573 pa_cvolume old_real_volume;
1574
1575 pa_sink_assert_ref(s);
1576 pa_assert_ctl_context();
1577 pa_assert(PA_SINK_IS_LINKED(s->state));
1578
1579 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1580
1581 old_real_volume = s->real_volume;
1582 s->real_volume = *new_real_volume;
1583
1584 propagate_real_volume(s, &old_real_volume);
1585 }
1586
1587 /* Called from main thread */
1588 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1589 pa_bool_t old_muted;
1590
1591 pa_sink_assert_ref(s);
1592 pa_assert_ctl_context();
1593 pa_assert(PA_SINK_IS_LINKED(s->state));
1594
1595 old_muted = s->muted;
1596 s->muted = mute;
1597 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1598
1599 if (s->set_mute)
1600 s->set_mute(s);
1601
1602 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1603
1604 if (old_muted != s->muted)
1605 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1606 }
1607
1608 /* Called from main thread */
1609 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1610
1611 pa_sink_assert_ref(s);
1612 pa_assert_ctl_context();
1613 pa_assert(PA_SINK_IS_LINKED(s->state));
1614
1615 if (s->refresh_muted || force_refresh) {
1616 pa_bool_t old_muted = s->muted;
1617
1618 if (s->get_mute)
1619 s->get_mute(s);
1620
1621 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1622
1623 if (old_muted != s->muted) {
1624 s->save_muted = TRUE;
1625
1626 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1627
1628 /* Make sure the soft mute status stays in sync */
1629 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1630 }
1631 }
1632
1633 return s->muted;
1634 }
1635
1636 /* Called from main thread */
1637 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1638 pa_sink_assert_ref(s);
1639 pa_assert_ctl_context();
1640 pa_assert(PA_SINK_IS_LINKED(s->state));
1641
1642 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1643
1644 if (s->muted == new_muted)
1645 return;
1646
1647 s->muted = new_muted;
1648 s->save_muted = TRUE;
1649
1650 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1651 }
1652
1653 /* Called from main thread */
1654 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1655 pa_sink_assert_ref(s);
1656 pa_assert_ctl_context();
1657
1658 if (p)
1659 pa_proplist_update(s->proplist, mode, p);
1660
1661 if (PA_SINK_IS_LINKED(s->state)) {
1662 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1663 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1664 }
1665
1666 return TRUE;
1667 }
1668
1669 /* Called from main thread */
1670 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1671 void pa_sink_set_description(pa_sink *s, const char *description) {
1672 const char *old;
1673 pa_sink_assert_ref(s);
1674 pa_assert_ctl_context();
1675
1676 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1677 return;
1678
1679 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1680
1681 if (old && description && pa_streq(old, description))
1682 return;
1683
1684 if (description)
1685 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1686 else
1687 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1688
1689 if (s->monitor_source) {
1690 char *n;
1691
1692 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1693 pa_source_set_description(s->monitor_source, n);
1694 pa_xfree(n);
1695 }
1696
1697 if (PA_SINK_IS_LINKED(s->state)) {
1698 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1699 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1700 }
1701 }
1702
1703 /* Called from main thread */
1704 unsigned pa_sink_linked_by(pa_sink *s) {
1705 unsigned ret;
1706
1707 pa_sink_assert_ref(s);
1708 pa_assert_ctl_context();
1709 pa_assert(PA_SINK_IS_LINKED(s->state));
1710
1711 ret = pa_idxset_size(s->inputs);
1712
1713 /* We add in the number of streams connected to us here. Please
1714 * note the asymmmetry to pa_sink_used_by()! */
1715
1716 if (s->monitor_source)
1717 ret += pa_source_linked_by(s->monitor_source);
1718
1719 return ret;
1720 }
1721
1722 /* Called from main thread */
1723 unsigned pa_sink_used_by(pa_sink *s) {
1724 unsigned ret;
1725
1726 pa_sink_assert_ref(s);
1727 pa_assert_ctl_context();
1728 pa_assert(PA_SINK_IS_LINKED(s->state));
1729
1730 ret = pa_idxset_size(s->inputs);
1731 pa_assert(ret >= s->n_corked);
1732
1733 /* Streams connected to our monitor source do not matter for
1734 * pa_sink_used_by()!.*/
1735
1736 return ret - s->n_corked;
1737 }
1738
1739 /* Called from main thread */
1740 unsigned pa_sink_check_suspend(pa_sink *s) {
1741 unsigned ret;
1742 pa_sink_input *i;
1743 uint32_t idx;
1744
1745 pa_sink_assert_ref(s);
1746 pa_assert_ctl_context();
1747
1748 if (!PA_SINK_IS_LINKED(s->state))
1749 return 0;
1750
1751 ret = 0;
1752
1753 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1754 pa_sink_input_state_t st;
1755
1756 st = pa_sink_input_get_state(i);
1757 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1758
1759 if (st == PA_SINK_INPUT_CORKED)
1760 continue;
1761
1762 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1763 continue;
1764
1765 ret ++;
1766 }
1767
1768 if (s->monitor_source)
1769 ret += pa_source_check_suspend(s->monitor_source);
1770
1771 return ret;
1772 }
1773
1774 /* Called from the IO thread */
1775 static void sync_input_volumes_within_thread(pa_sink *s) {
1776 pa_sink_input *i;
1777 void *state = NULL;
1778
1779 pa_sink_assert_ref(s);
1780 pa_sink_assert_io_context(s);
1781
1782 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1783 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1784 continue;
1785
1786 i->thread_info.soft_volume = i->soft_volume;
1787 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1788 }
1789 }
1790
1791 /* Called from IO thread, except when it is not */
1792 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1793 pa_sink *s = PA_SINK(o);
1794 pa_sink_assert_ref(s);
1795
1796 switch ((pa_sink_message_t) code) {
1797
1798 case PA_SINK_MESSAGE_ADD_INPUT: {
1799 pa_sink_input *i = PA_SINK_INPUT(userdata);
1800
1801 /* If you change anything here, make sure to change the
1802 * sink input handling a few lines down at
1803 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1804
1805 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1806
1807 /* Since the caller sleeps in pa_sink_input_put(), we can
1808 * safely access data outside of thread_info even though
1809 * it is mutable */
1810
1811 if ((i->thread_info.sync_prev = i->sync_prev)) {
1812 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1813 pa_assert(i->sync_prev->sync_next == i);
1814 i->thread_info.sync_prev->thread_info.sync_next = i;
1815 }
1816
1817 if ((i->thread_info.sync_next = i->sync_next)) {
1818 pa_assert(i->sink == i->thread_info.sync_next->sink);
1819 pa_assert(i->sync_next->sync_prev == i);
1820 i->thread_info.sync_next->thread_info.sync_prev = i;
1821 }
1822
1823 pa_assert(!i->thread_info.attached);
1824 i->thread_info.attached = TRUE;
1825
1826 if (i->attach)
1827 i->attach(i);
1828
1829 pa_sink_input_set_state_within_thread(i, i->state);
1830
1831 /* The requested latency of the sink input needs to be
1832 * fixed up and then configured on the sink */
1833
1834 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1835 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1836
1837 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1838 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1839
1840 /* We don't rewind here automatically. This is left to the
1841 * sink input implementor because some sink inputs need a
1842 * slow start, i.e. need some time to buffer client
1843 * samples before beginning streaming. */
1844
1845 /* In flat volume mode we need to update the volume as
1846 * well */
1847 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1848 }
1849
1850 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1851 pa_sink_input *i = PA_SINK_INPUT(userdata);
1852
1853 /* If you change anything here, make sure to change the
1854 * sink input handling a few lines down at
1855 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1856
1857 if (i->detach)
1858 i->detach(i);
1859
1860 pa_sink_input_set_state_within_thread(i, i->state);
1861
1862 pa_assert(i->thread_info.attached);
1863 i->thread_info.attached = FALSE;
1864
1865 /* Since the caller sleeps in pa_sink_input_unlink(),
1866 * we can safely access data outside of thread_info even
1867 * though it is mutable */
1868
1869 pa_assert(!i->sync_prev);
1870 pa_assert(!i->sync_next);
1871
1872 if (i->thread_info.sync_prev) {
1873 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1874 i->thread_info.sync_prev = NULL;
1875 }
1876
1877 if (i->thread_info.sync_next) {
1878 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1879 i->thread_info.sync_next = NULL;
1880 }
1881
1882 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1883 pa_sink_input_unref(i);
1884
1885 pa_sink_invalidate_requested_latency(s, TRUE);
1886 pa_sink_request_rewind(s, (size_t) -1);
1887
1888 /* In flat volume mode we need to update the volume as
1889 * well */
1890 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1891 }
1892
1893 case PA_SINK_MESSAGE_START_MOVE: {
1894 pa_sink_input *i = PA_SINK_INPUT(userdata);
1895
1896 /* We don't support moving synchronized streams. */
1897 pa_assert(!i->sync_prev);
1898 pa_assert(!i->sync_next);
1899 pa_assert(!i->thread_info.sync_next);
1900 pa_assert(!i->thread_info.sync_prev);
1901
1902 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1903 pa_usec_t usec = 0;
1904 size_t sink_nbytes, total_nbytes;
1905
1906 /* Get the latency of the sink */
1907 usec = pa_sink_get_latency_within_thread(s);
1908 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1909 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1910
1911 if (total_nbytes > 0) {
1912 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1913 i->thread_info.rewrite_flush = TRUE;
1914 pa_sink_input_process_rewind(i, sink_nbytes);
1915 }
1916 }
1917
1918 if (i->detach)
1919 i->detach(i);
1920
1921 pa_assert(i->thread_info.attached);
1922 i->thread_info.attached = FALSE;
1923
1924 /* Let's remove the sink input ...*/
1925 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1926 pa_sink_input_unref(i);
1927
1928 pa_sink_invalidate_requested_latency(s, TRUE);
1929
1930 pa_log_debug("Requesting rewind due to started move");
1931 pa_sink_request_rewind(s, (size_t) -1);
1932
1933 /* In flat volume mode we need to update the volume as
1934 * well */
1935 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1936 }
1937
1938 case PA_SINK_MESSAGE_FINISH_MOVE: {
1939 pa_sink_input *i = PA_SINK_INPUT(userdata);
1940
1941 /* We don't support moving synchronized streams. */
1942 pa_assert(!i->sync_prev);
1943 pa_assert(!i->sync_next);
1944 pa_assert(!i->thread_info.sync_next);
1945 pa_assert(!i->thread_info.sync_prev);
1946
1947 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1948
1949 pa_assert(!i->thread_info.attached);
1950 i->thread_info.attached = TRUE;
1951
1952 if (i->attach)
1953 i->attach(i);
1954
1955 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1956 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1957
1958 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1959 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1960
1961 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1962 pa_usec_t usec = 0;
1963 size_t nbytes;
1964
1965 /* Get the latency of the sink */
1966 usec = pa_sink_get_latency_within_thread(s);
1967 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1968
1969 if (nbytes > 0)
1970 pa_sink_input_drop(i, nbytes);
1971
1972 pa_log_debug("Requesting rewind due to finished move");
1973 pa_sink_request_rewind(s, nbytes);
1974 }
1975
1976 /* In flat volume mode we need to update the volume as
1977 * well */
1978 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1979 }
1980
1981 case PA_SINK_MESSAGE_SET_VOLUME:
1982
1983 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1984 s->thread_info.soft_volume = s->soft_volume;
1985 pa_sink_request_rewind(s, (size_t) -1);
1986 }
1987
1988 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1989 return 0;
1990
1991 /* Fall through ... */
1992
1993 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1994 sync_input_volumes_within_thread(s);
1995 return 0;
1996
1997 case PA_SINK_MESSAGE_GET_VOLUME:
1998 return 0;
1999
2000 case PA_SINK_MESSAGE_SET_MUTE:
2001
2002 if (s->thread_info.soft_muted != s->muted) {
2003 s->thread_info.soft_muted = s->muted;
2004 pa_sink_request_rewind(s, (size_t) -1);
2005 }
2006
2007 return 0;
2008
2009 case PA_SINK_MESSAGE_GET_MUTE:
2010 return 0;
2011
2012 case PA_SINK_MESSAGE_SET_STATE: {
2013
2014 pa_bool_t suspend_change =
2015 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
2016 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
2017
2018 s->thread_info.state = PA_PTR_TO_UINT(userdata);
2019
2020 if (s->thread_info.state == PA_SINK_SUSPENDED) {
2021 s->thread_info.rewind_nbytes = 0;
2022 s->thread_info.rewind_requested = FALSE;
2023 }
2024
2025 if (suspend_change) {
2026 pa_sink_input *i;
2027 void *state = NULL;
2028
2029 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2030 if (i->suspend_within_thread)
2031 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
2032 }
2033
2034 return 0;
2035 }
2036
2037 case PA_SINK_MESSAGE_DETACH:
2038
2039 /* Detach all streams */
2040 pa_sink_detach_within_thread(s);
2041 return 0;
2042
2043 case PA_SINK_MESSAGE_ATTACH:
2044
2045 /* Reattach all streams */
2046 pa_sink_attach_within_thread(s);
2047 return 0;
2048
2049 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
2050
2051 pa_usec_t *usec = userdata;
2052 *usec = pa_sink_get_requested_latency_within_thread(s);
2053
2054 /* Yes, that's right, the IO thread will see -1 when no
2055 * explicit requested latency is configured, the main
2056 * thread will see max_latency */
2057 if (*usec == (pa_usec_t) -1)
2058 *usec = s->thread_info.max_latency;
2059
2060 return 0;
2061 }
2062
2063 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
2064 pa_usec_t *r = userdata;
2065
2066 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
2067
2068 return 0;
2069 }
2070
2071 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
2072 pa_usec_t *r = userdata;
2073
2074 r[0] = s->thread_info.min_latency;
2075 r[1] = s->thread_info.max_latency;
2076
2077 return 0;
2078 }
2079
2080 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
2081
2082 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
2083 return 0;
2084
2085 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
2086
2087 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
2088 return 0;
2089
2090 case PA_SINK_MESSAGE_GET_MAX_REWIND:
2091
2092 *((size_t*) userdata) = s->thread_info.max_rewind;
2093 return 0;
2094
2095 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
2096
2097 *((size_t*) userdata) = s->thread_info.max_request;
2098 return 0;
2099
2100 case PA_SINK_MESSAGE_SET_MAX_REWIND:
2101
2102 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
2103 return 0;
2104
2105 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
2106
2107 pa_sink_set_max_request_within_thread(s, (size_t) offset);
2108 return 0;
2109
2110 case PA_SINK_MESSAGE_GET_LATENCY:
2111 case PA_SINK_MESSAGE_MAX:
2112 ;
2113 }
2114
2115 return -1;
2116 }
2117
2118 /* Called from main thread */
2119 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2120 pa_sink *sink;
2121 uint32_t idx;
2122 int ret = 0;
2123
2124 pa_core_assert_ref(c);
2125 pa_assert_ctl_context();
2126 pa_assert(cause != 0);
2127
2128 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2129 int r;
2130
2131 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2132 ret = r;
2133 }
2134
2135 return ret;
2136 }
2137
2138 /* Called from main thread */
2139 void pa_sink_detach(pa_sink *s) {
2140 pa_sink_assert_ref(s);
2141 pa_assert_ctl_context();
2142 pa_assert(PA_SINK_IS_LINKED(s->state));
2143
2144 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2145 }
2146
2147 /* Called from main thread */
2148 void pa_sink_attach(pa_sink *s) {
2149 pa_sink_assert_ref(s);
2150 pa_assert_ctl_context();
2151 pa_assert(PA_SINK_IS_LINKED(s->state));
2152
2153 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2154 }
2155
2156 /* Called from IO thread */
2157 void pa_sink_detach_within_thread(pa_sink *s) {
2158 pa_sink_input *i;
2159 void *state = NULL;
2160
2161 pa_sink_assert_ref(s);
2162 pa_sink_assert_io_context(s);
2163 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2164
2165 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2166 if (i->detach)
2167 i->detach(i);
2168
2169 if (s->monitor_source)
2170 pa_source_detach_within_thread(s->monitor_source);
2171 }
2172
2173 /* Called from IO thread */
2174 void pa_sink_attach_within_thread(pa_sink *s) {
2175 pa_sink_input *i;
2176 void *state = NULL;
2177
2178 pa_sink_assert_ref(s);
2179 pa_sink_assert_io_context(s);
2180 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2181
2182 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2183 if (i->attach)
2184 i->attach(i);
2185
2186 if (s->monitor_source)
2187 pa_source_attach_within_thread(s->monitor_source);
2188 }
2189
2190 /* Called from IO thread */
2191 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2192 pa_sink_assert_ref(s);
2193 pa_sink_assert_io_context(s);
2194 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2195
2196 if (s->thread_info.state == PA_SINK_SUSPENDED)
2197 return;
2198
2199 if (nbytes == (size_t) -1)
2200 nbytes = s->thread_info.max_rewind;
2201
2202 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2203
2204 if (s->thread_info.rewind_requested &&
2205 nbytes <= s->thread_info.rewind_nbytes)
2206 return;
2207
2208 s->thread_info.rewind_nbytes = nbytes;
2209 s->thread_info.rewind_requested = TRUE;
2210
2211 if (s->request_rewind)
2212 s->request_rewind(s);
2213 }
2214
2215 /* Called from IO thread */
2216 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2217 pa_usec_t result = (pa_usec_t) -1;
2218 pa_sink_input *i;
2219 void *state = NULL;
2220 pa_usec_t monitor_latency;
2221
2222 pa_sink_assert_ref(s);
2223 pa_sink_assert_io_context(s);
2224
2225 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2226 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2227
2228 if (s->thread_info.requested_latency_valid)
2229 return s->thread_info.requested_latency;
2230
2231 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2232 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2233 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2234 result = i->thread_info.requested_sink_latency;
2235
2236 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2237
2238 if (monitor_latency != (pa_usec_t) -1 &&
2239 (result == (pa_usec_t) -1 || result > monitor_latency))
2240 result = monitor_latency;
2241
2242 if (result != (pa_usec_t) -1)
2243 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2244
2245 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2246 /* Only cache if properly initialized */
2247 s->thread_info.requested_latency = result;
2248 s->thread_info.requested_latency_valid = TRUE;
2249 }
2250
2251 return result;
2252 }
2253
2254 /* Called from main thread */
2255 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2256 pa_usec_t usec = 0;
2257
2258 pa_sink_assert_ref(s);
2259 pa_assert_ctl_context();
2260 pa_assert(PA_SINK_IS_LINKED(s->state));
2261
2262 if (s->state == PA_SINK_SUSPENDED)
2263 return 0;
2264
2265 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2266 return usec;
2267 }
2268
2269 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2270 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2271 pa_sink_input *i;
2272 void *state = NULL;
2273
2274 pa_sink_assert_ref(s);
2275 pa_sink_assert_io_context(s);
2276
2277 if (max_rewind == s->thread_info.max_rewind)
2278 return;
2279
2280 s->thread_info.max_rewind = max_rewind;
2281
2282 if (PA_SINK_IS_LINKED(s->thread_info.state))
2283 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2284 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2285
2286 if (s->monitor_source)
2287 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2288 }
2289
2290 /* Called from main thread */
2291 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2292 pa_sink_assert_ref(s);
2293 pa_assert_ctl_context();
2294
2295 if (PA_SINK_IS_LINKED(s->state))
2296 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2297 else
2298 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2299 }
2300
2301 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2302 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2303 void *state = NULL;
2304
2305 pa_sink_assert_ref(s);
2306 pa_sink_assert_io_context(s);
2307
2308 if (max_request == s->thread_info.max_request)
2309 return;
2310
2311 s->thread_info.max_request = max_request;
2312
2313 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2314 pa_sink_input *i;
2315
2316 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2317 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2318 }
2319 }
2320
2321 /* Called from main thread */
2322 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2323 pa_sink_assert_ref(s);
2324 pa_assert_ctl_context();
2325
2326 if (PA_SINK_IS_LINKED(s->state))
2327 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2328 else
2329 pa_sink_set_max_request_within_thread(s, max_request);
2330 }
2331
2332 /* Called from IO thread */
2333 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2334 pa_sink_input *i;
2335 void *state = NULL;
2336
2337 pa_sink_assert_ref(s);
2338 pa_sink_assert_io_context(s);
2339
2340 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2341 s->thread_info.requested_latency_valid = FALSE;
2342 else if (dynamic)
2343 return;
2344
2345 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2346
2347 if (s->update_requested_latency)
2348 s->update_requested_latency(s);
2349
2350 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2351 if (i->update_sink_requested_latency)
2352 i->update_sink_requested_latency(i);
2353 }
2354 }
2355
2356 /* Called from main thread */
2357 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2358 pa_sink_assert_ref(s);
2359 pa_assert_ctl_context();
2360
2361 /* min_latency == 0: no limit
2362 * min_latency anything else: specified limit
2363 *
2364 * Similar for max_latency */
2365
2366 if (min_latency < ABSOLUTE_MIN_LATENCY)
2367 min_latency = ABSOLUTE_MIN_LATENCY;
2368
2369 if (max_latency <= 0 ||
2370 max_latency > ABSOLUTE_MAX_LATENCY)
2371 max_latency = ABSOLUTE_MAX_LATENCY;
2372
2373 pa_assert(min_latency <= max_latency);
2374
2375 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2376 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2377 max_latency == ABSOLUTE_MAX_LATENCY) ||
2378 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2379
2380 if (PA_SINK_IS_LINKED(s->state)) {
2381 pa_usec_t r[2];
2382
2383 r[0] = min_latency;
2384 r[1] = max_latency;
2385
2386 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2387 } else
2388 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2389 }
2390
2391 /* Called from main thread */
2392 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2393 pa_sink_assert_ref(s);
2394 pa_assert_ctl_context();
2395 pa_assert(min_latency);
2396 pa_assert(max_latency);
2397
2398 if (PA_SINK_IS_LINKED(s->state)) {
2399 pa_usec_t r[2] = { 0, 0 };
2400
2401 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2402
2403 *min_latency = r[0];
2404 *max_latency = r[1];
2405 } else {
2406 *min_latency = s->thread_info.min_latency;
2407 *max_latency = s->thread_info.max_latency;
2408 }
2409 }
2410
2411 /* Called from IO thread */
2412 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2413 pa_sink_assert_ref(s);
2414 pa_sink_assert_io_context(s);
2415
2416 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2417 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2418 pa_assert(min_latency <= max_latency);
2419
2420 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2421 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2422 max_latency == ABSOLUTE_MAX_LATENCY) ||
2423 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2424
2425 if (s->thread_info.min_latency == min_latency &&
2426 s->thread_info.max_latency == max_latency)
2427 return;
2428
2429 s->thread_info.min_latency = min_latency;
2430 s->thread_info.max_latency = max_latency;
2431
2432 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2433 pa_sink_input *i;
2434 void *state = NULL;
2435
2436 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2437 if (i->update_sink_latency_range)
2438 i->update_sink_latency_range(i);
2439 }
2440
2441 pa_sink_invalidate_requested_latency(s, FALSE);
2442
2443 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2444 }
2445
2446 /* Called from main thread */
2447 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2448 pa_sink_assert_ref(s);
2449 pa_assert_ctl_context();
2450
2451 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2452 pa_assert(latency == 0);
2453 return;
2454 }
2455
2456 if (latency < ABSOLUTE_MIN_LATENCY)
2457 latency = ABSOLUTE_MIN_LATENCY;
2458
2459 if (latency > ABSOLUTE_MAX_LATENCY)
2460 latency = ABSOLUTE_MAX_LATENCY;
2461
2462 if (PA_SINK_IS_LINKED(s->state))
2463 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2464 else
2465 s->thread_info.fixed_latency = latency;
2466
2467 pa_source_set_fixed_latency(s->monitor_source, latency);
2468 }
2469
2470 /* Called from main thread */
2471 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2472 pa_usec_t latency;
2473
2474 pa_sink_assert_ref(s);
2475 pa_assert_ctl_context();
2476
2477 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2478 return 0;
2479
2480 if (PA_SINK_IS_LINKED(s->state))
2481 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2482 else
2483 latency = s->thread_info.fixed_latency;
2484
2485 return latency;
2486 }
2487
2488 /* Called from IO thread */
2489 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2490 pa_sink_assert_ref(s);
2491 pa_sink_assert_io_context(s);
2492
2493 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2494 pa_assert(latency == 0);
2495 return;
2496 }
2497
2498 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2499 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2500
2501 if (s->thread_info.fixed_latency == latency)
2502 return;
2503
2504 s->thread_info.fixed_latency = latency;
2505
2506 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2507 pa_sink_input *i;
2508 void *state = NULL;
2509
2510 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2511 if (i->update_sink_fixed_latency)
2512 i->update_sink_fixed_latency(i);
2513 }
2514
2515 pa_sink_invalidate_requested_latency(s, FALSE);
2516
2517 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2518 }
2519
2520 /* Called from main context */
2521 size_t pa_sink_get_max_rewind(pa_sink *s) {
2522 size_t r;
2523 pa_sink_assert_ref(s);
2524 pa_assert_ctl_context();
2525
2526 if (!PA_SINK_IS_LINKED(s->state))
2527 return s->thread_info.max_rewind;
2528
2529 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2530
2531 return r;
2532 }
2533
2534 /* Called from main context */
2535 size_t pa_sink_get_max_request(pa_sink *s) {
2536 size_t r;
2537 pa_sink_assert_ref(s);
2538 pa_assert_ctl_context();
2539
2540 if (!PA_SINK_IS_LINKED(s->state))
2541 return s->thread_info.max_request;
2542
2543 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2544
2545 return r;
2546 }
2547
2548 /* Called from main context */
2549 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2550 pa_device_port *port;
2551
2552 pa_sink_assert_ref(s);
2553 pa_assert_ctl_context();
2554
2555 if (!s->set_port) {
2556 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2557 return -PA_ERR_NOTIMPLEMENTED;
2558 }
2559
2560 if (!s->ports)
2561 return -PA_ERR_NOENTITY;
2562
2563 if (!(port = pa_hashmap_get(s->ports, name)))
2564 return -PA_ERR_NOENTITY;
2565
2566 if (s->active_port == port) {
2567 s->save_port = s->save_port || save;
2568 return 0;
2569 }
2570
2571 if ((s->set_port(s, port)) < 0)
2572 return -PA_ERR_NOENTITY;
2573
2574 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2575
2576 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2577
2578 s->active_port = port;
2579 s->save_port = save;
2580
2581 return 0;
2582 }
2583
2584 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2585 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2586
2587 pa_assert(p);
2588
2589 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2590 return TRUE;
2591
2592 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2593
2594 if (pa_streq(ff, "microphone"))
2595 t = "audio-input-microphone";
2596 else if (pa_streq(ff, "webcam"))
2597 t = "camera-web";
2598 else if (pa_streq(ff, "computer"))
2599 t = "computer";
2600 else if (pa_streq(ff, "handset"))
2601 t = "phone";
2602 else if (pa_streq(ff, "portable"))
2603 t = "multimedia-player";
2604 else if (pa_streq(ff, "tv"))
2605 t = "video-display";
2606
2607 /*
2608 * The following icons are not part of the icon naming spec,
2609 * because Rodney Dawes sucks as the maintainer of that spec.
2610 *
2611 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2612 */
2613 else if (pa_streq(ff, "headset"))
2614 t = "audio-headset";
2615 else if (pa_streq(ff, "headphone"))
2616 t = "audio-headphones";
2617 else if (pa_streq(ff, "speaker"))
2618 t = "audio-speakers";
2619 else if (pa_streq(ff, "hands-free"))
2620 t = "audio-handsfree";
2621 }
2622
2623 if (!t)
2624 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2625 if (pa_streq(c, "modem"))
2626 t = "modem";
2627
2628 if (!t) {
2629 if (is_sink)
2630 t = "audio-card";
2631 else
2632 t = "audio-input-microphone";
2633 }
2634
2635 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2636 if (strstr(profile, "analog"))
2637 s = "-analog";
2638 else if (strstr(profile, "iec958"))
2639 s = "-iec958";
2640 else if (strstr(profile, "hdmi"))
2641 s = "-hdmi";
2642 }
2643
2644 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2645
2646 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2647
2648 return TRUE;
2649 }
2650
2651 pa_bool_t pa_device_init_description(pa_proplist *p) {
2652 const char *s, *d = NULL, *k;
2653 pa_assert(p);
2654
2655 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2656 return TRUE;
2657
2658 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2659 if (pa_streq(s, "internal"))
2660 d = _("Internal Audio");
2661
2662 if (!d)
2663 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2664 if (pa_streq(s, "modem"))
2665 d = _("Modem");
2666
2667 if (!d)
2668 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2669
2670 if (!d)
2671 return FALSE;
2672
2673 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2674
2675 if (d && k)
2676 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2677 else if (d)
2678 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2679
2680 return TRUE;
2681 }
2682
2683 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2684 const char *s;
2685 pa_assert(p);
2686
2687 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2688 return TRUE;
2689
2690 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2691 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2692 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2693 return TRUE;
2694 }
2695
2696 return FALSE;
2697 }