]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: be a bit more verbose when registering a sink/source fails
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180
181 s = pa_msgobject_new(pa_sink);
182
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
184 pa_log_debug("Failed to register name %s.", data->name);
185 pa_xfree(s);
186 return NULL;
187 }
188
189 pa_sink_new_data_set_name(data, name);
190
191 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
192 pa_xfree(s);
193 pa_namereg_unregister(core, name);
194 return NULL;
195 }
196
197 /* FIXME, need to free s here on failure */
198
199 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
200 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
201
202 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
203
204 if (!data->channel_map_is_set)
205 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
206
207 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
208 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
209
210 if (!data->volume_is_set)
211 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
212
213 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
214 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
215
216 if (!data->muted_is_set)
217 data->muted = FALSE;
218
219 if (data->card)
220 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221
222 pa_device_init_description(data->proplist);
223 pa_device_init_icon(data->proplist, TRUE);
224 pa_device_init_intended_roles(data->proplist);
225
226 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
227 pa_xfree(s);
228 pa_namereg_unregister(core, name);
229 return NULL;
230 }
231
232 s->parent.parent.free = sink_free;
233 s->parent.process_msg = pa_sink_process_msg;
234
235 s->core = core;
236 s->state = PA_SINK_INIT;
237 s->flags = flags;
238 s->suspend_cause = 0;
239 s->name = pa_xstrdup(name);
240 s->proplist = pa_proplist_copy(data->proplist);
241 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
242 s->module = data->module;
243 s->card = data->card;
244
245 s->sample_spec = data->sample_spec;
246 s->channel_map = data->channel_map;
247
248 s->inputs = pa_idxset_new(NULL, NULL);
249 s->n_corked = 0;
250
251 s->reference_volume = s->virtual_volume = data->volume;
252 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
253 s->base_volume = PA_VOLUME_NORM;
254 s->n_volume_steps = PA_VOLUME_NORM+1;
255 s->muted = data->muted;
256 s->refresh_volume = s->refresh_muted = FALSE;
257
258 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
259
260 reset_callbacks(s);
261 s->userdata = NULL;
262
263 s->asyncmsgq = NULL;
264 s->rtpoll = NULL;
265
266 /* As a minor optimization we just steal the list instead of
267 * copying it here */
268 s->ports = data->ports;
269 data->ports = NULL;
270
271 s->active_port = NULL;
272 s->save_port = FALSE;
273
274 if (data->active_port && s->ports)
275 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
276 s->save_port = data->save_port;
277
278 if (!s->active_port && s->ports) {
279 void *state;
280 pa_device_port *p;
281
282 PA_HASHMAP_FOREACH(p, s->ports, state)
283 if (!s->active_port || p->priority > s->active_port->priority)
284 s->active_port = p;
285 }
286
287 s->save_volume = data->save_volume;
288 s->save_muted = data->save_muted;
289
290 pa_silence_memchunk_get(
291 &core->silence_cache,
292 core->mempool,
293 &s->silence,
294 &s->sample_spec,
295 0);
296
297 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
298 s->thread_info.soft_volume = s->soft_volume;
299 s->thread_info.soft_muted = s->muted;
300 s->thread_info.state = s->state;
301 s->thread_info.rewind_nbytes = 0;
302 s->thread_info.rewind_requested = FALSE;
303 s->thread_info.max_rewind = 0;
304 s->thread_info.max_request = 0;
305 s->thread_info.requested_latency_valid = FALSE;
306 s->thread_info.requested_latency = 0;
307 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
308 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
309
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
311
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
323
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
331
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339
340 pa_source_new_data_done(&source_data);
341
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
346 }
347
348 s->monitor_source->monitor_of = s;
349
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
352
353 return s;
354 }
355
356 /* Called from main context */
357 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
358 int ret;
359 pa_bool_t suspend_change;
360 pa_sink_state_t original_state;
361
362 pa_assert(s);
363
364 if (s->state == state)
365 return 0;
366
367 original_state = s->state;
368
369 suspend_change =
370 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
371 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
372
373 if (s->set_state)
374 if ((ret = s->set_state(s, state)) < 0)
375 return ret;
376
377 if (s->asyncmsgq)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379
380 if (s->set_state)
381 s->set_state(s, original_state);
382
383 return ret;
384 }
385
386 s->state = state;
387
388 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 }
392
393 if (suspend_change) {
394 pa_sink_input *i;
395 uint32_t idx;
396
397 /* We're suspending or resuming, tell everyone about it */
398
399 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
400 if (s->state == PA_SINK_SUSPENDED &&
401 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
402 pa_sink_input_kill(i);
403 else if (i->suspend)
404 i->suspend(i, state == PA_SINK_SUSPENDED);
405
406 if (s->monitor_source)
407 pa_source_sync_suspend(s->monitor_source);
408 }
409
410 return 0;
411 }
412
413 /* Called from main context */
414 void pa_sink_put(pa_sink* s) {
415 pa_sink_assert_ref(s);
416
417 pa_assert(s->state == PA_SINK_INIT);
418
419 /* The following fields must be initialized properly when calling _put() */
420 pa_assert(s->asyncmsgq);
421 pa_assert(s->rtpoll);
422 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
423
424 /* Generally, flags should be initialized via pa_sink_new(). As a
425 * special exception we allow volume related flags to be set
426 * between _new() and _put(). */
427
428 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
429 s->flags |= PA_SINK_DECIBEL_VOLUME;
430
431 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
432 s->flags |= PA_SINK_FLAT_VOLUME;
433
434 s->thread_info.soft_volume = s->soft_volume;
435 s->thread_info.soft_muted = s->muted;
436
437 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
438 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
439 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
440 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
442
443 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
444 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
445 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
446
447 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
448
449 pa_source_put(s->monitor_source);
450
451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
452 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
453 }
454
455 /* Called from main context */
456 void pa_sink_unlink(pa_sink* s) {
457 pa_bool_t linked;
458 pa_sink_input *i, *j = NULL;
459
460 pa_assert(s);
461
462 /* Please note that pa_sink_unlink() does more than simply
463 * reversing pa_sink_put(). It also undoes the registrations
464 * already done in pa_sink_new()! */
465
466 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
467 * may be called multiple times on the same sink without bad
468 * effects. */
469
470 linked = PA_SINK_IS_LINKED(s->state);
471
472 if (linked)
473 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
474
475 if (s->state != PA_SINK_UNLINKED)
476 pa_namereg_unregister(s->core, s->name);
477 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
478
479 if (s->card)
480 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
481
482 while ((i = pa_idxset_first(s->inputs, NULL))) {
483 pa_assert(i != j);
484 pa_sink_input_kill(i);
485 j = i;
486 }
487
488 if (linked)
489 sink_set_state(s, PA_SINK_UNLINKED);
490 else
491 s->state = PA_SINK_UNLINKED;
492
493 reset_callbacks(s);
494
495 if (s->monitor_source)
496 pa_source_unlink(s->monitor_source);
497
498 if (linked) {
499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
500 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
501 }
502 }
503
504 /* Called from main context */
505 static void sink_free(pa_object *o) {
506 pa_sink *s = PA_SINK(o);
507 pa_sink_input *i;
508
509 pa_assert(s);
510 pa_assert(pa_sink_refcnt(s) == 0);
511
512 if (PA_SINK_IS_LINKED(s->state))
513 pa_sink_unlink(s);
514
515 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
516
517 if (s->monitor_source) {
518 pa_source_unref(s->monitor_source);
519 s->monitor_source = NULL;
520 }
521
522 pa_idxset_free(s->inputs, NULL, NULL);
523
524 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
525 pa_sink_input_unref(i);
526
527 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
528
529 if (s->silence.memblock)
530 pa_memblock_unref(s->silence.memblock);
531
532 pa_xfree(s->name);
533 pa_xfree(s->driver);
534
535 if (s->proplist)
536 pa_proplist_free(s->proplist);
537
538 if (s->ports) {
539 pa_device_port *p;
540
541 while ((p = pa_hashmap_steal_first(s->ports)))
542 pa_device_port_free(p);
543
544 pa_hashmap_free(s->ports, NULL, NULL);
545 }
546
547 pa_xfree(s);
548 }
549
550 /* Called from main context */
551 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
552 pa_sink_assert_ref(s);
553
554 s->asyncmsgq = q;
555
556 if (s->monitor_source)
557 pa_source_set_asyncmsgq(s->monitor_source, q);
558 }
559
560 /* Called from main context */
561 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
562 pa_sink_assert_ref(s);
563
564 s->rtpoll = p;
565
566 if (s->monitor_source)
567 pa_source_set_rtpoll(s->monitor_source, p);
568 }
569
570 /* Called from main context */
571 int pa_sink_update_status(pa_sink*s) {
572 pa_sink_assert_ref(s);
573 pa_assert(PA_SINK_IS_LINKED(s->state));
574
575 if (s->state == PA_SINK_SUSPENDED)
576 return 0;
577
578 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
579 }
580
581 /* Called from main context */
582 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
583 pa_sink_assert_ref(s);
584 pa_assert(PA_SINK_IS_LINKED(s->state));
585 pa_assert(cause != 0);
586
587 if (suspend) {
588 s->suspend_cause |= cause;
589 s->monitor_source->suspend_cause |= cause;
590 } else {
591 s->suspend_cause &= ~cause;
592 s->monitor_source->suspend_cause &= ~cause;
593 }
594
595 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
596 return 0;
597
598 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
599
600 if (s->suspend_cause)
601 return sink_set_state(s, PA_SINK_SUSPENDED);
602 else
603 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
604 }
605
606 /* Called from main context */
607 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
608 pa_sink_input *i, *n;
609 uint32_t idx;
610
611 pa_sink_assert_ref(s);
612 pa_assert(PA_SINK_IS_LINKED(s->state));
613
614 if (!q)
615 q = pa_queue_new();
616
617 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
618 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
619
620 pa_sink_input_ref(i);
621
622 if (pa_sink_input_start_move(i) >= 0)
623 pa_queue_push(q, i);
624 else
625 pa_sink_input_unref(i);
626 }
627
628 return q;
629 }
630
631 /* Called from main context */
632 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
633 pa_sink_input *i;
634
635 pa_sink_assert_ref(s);
636 pa_assert(PA_SINK_IS_LINKED(s->state));
637 pa_assert(q);
638
639 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
640 if (pa_sink_input_finish_move(i, s, save) < 0)
641 pa_sink_input_kill(i);
642
643 pa_sink_input_unref(i);
644 }
645
646 pa_queue_free(q, NULL, NULL);
647 }
648
649 /* Called from main context */
650 void pa_sink_move_all_fail(pa_queue *q) {
651 pa_sink_input *i;
652 pa_assert(q);
653
654 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
655 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
656 pa_sink_input_kill(i);
657 pa_sink_input_unref(i);
658 }
659 }
660
661 pa_queue_free(q, NULL, NULL);
662 }
663
664 /* Called from IO thread context */
665 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
666 pa_sink_input *i;
667 void *state = NULL;
668 pa_sink_assert_ref(s);
669 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
670
671 /* If nobody requested this and this is actually no real rewind
672 * then we can short cut this */
673 if (!s->thread_info.rewind_requested && nbytes <= 0)
674 return;
675
676 s->thread_info.rewind_nbytes = 0;
677 s->thread_info.rewind_requested = FALSE;
678
679 if (s->thread_info.state == PA_SINK_SUSPENDED)
680 return;
681
682 if (nbytes > 0)
683 pa_log_debug("Processing rewind...");
684
685 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
686 pa_sink_input_assert_ref(i);
687 pa_sink_input_process_rewind(i, nbytes);
688 }
689
690 if (nbytes > 0)
691 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
692 pa_source_process_rewind(s->monitor_source, nbytes);
693 }
694
695 /* Called from IO thread context */
696 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
697 pa_sink_input *i;
698 unsigned n = 0;
699 void *state = NULL;
700 size_t mixlength = *length;
701
702 pa_sink_assert_ref(s);
703 pa_assert(info);
704
705 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
706 pa_sink_input_assert_ref(i);
707
708 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
709
710 if (mixlength == 0 || info->chunk.length < mixlength)
711 mixlength = info->chunk.length;
712
713 if (pa_memblock_is_silence(info->chunk.memblock)) {
714 pa_memblock_unref(info->chunk.memblock);
715 continue;
716 }
717
718 info->userdata = pa_sink_input_ref(i);
719
720 pa_assert(info->chunk.memblock);
721 pa_assert(info->chunk.length > 0);
722
723 info++;
724 n++;
725 maxinfo--;
726 }
727
728 if (mixlength > 0)
729 *length = mixlength;
730
731 return n;
732 }
733
734 /* Called from IO thread context */
735 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
736 pa_sink_input *i;
737 void *state = NULL;
738 unsigned p = 0;
739 unsigned n_unreffed = 0;
740
741 pa_sink_assert_ref(s);
742 pa_assert(result);
743 pa_assert(result->memblock);
744 pa_assert(result->length > 0);
745
746 /* We optimize for the case where the order of the inputs has not changed */
747
748 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
749 unsigned j;
750 pa_mix_info* m = NULL;
751
752 pa_sink_input_assert_ref(i);
753
754 /* Let's try to find the matching entry info the pa_mix_info array */
755 for (j = 0; j < n; j ++) {
756
757 if (info[p].userdata == i) {
758 m = info + p;
759 break;
760 }
761
762 p++;
763 if (p >= n)
764 p = 0;
765 }
766
767 /* Drop read data */
768 pa_sink_input_drop(i, result->length);
769
770 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
771
772 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
773 void *ostate = NULL;
774 pa_source_output *o;
775 pa_memchunk c;
776
777 if (m && m->chunk.memblock) {
778 c = m->chunk;
779 pa_memblock_ref(c.memblock);
780 pa_assert(result->length <= c.length);
781 c.length = result->length;
782
783 pa_memchunk_make_writable(&c, 0);
784 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
785 } else {
786 c = s->silence;
787 pa_memblock_ref(c.memblock);
788 pa_assert(result->length <= c.length);
789 c.length = result->length;
790 }
791
792 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
793 pa_source_output_assert_ref(o);
794 pa_assert(o->direct_on_input == i);
795 pa_source_post_direct(s->monitor_source, o, &c);
796 }
797
798 pa_memblock_unref(c.memblock);
799 }
800 }
801
802 if (m) {
803 if (m->chunk.memblock)
804 pa_memblock_unref(m->chunk.memblock);
805 pa_memchunk_reset(&m->chunk);
806
807 pa_sink_input_unref(m->userdata);
808 m->userdata = NULL;
809
810 n_unreffed += 1;
811 }
812 }
813
814 /* Now drop references to entries that are included in the
815 * pa_mix_info array but don't exist anymore */
816
817 if (n_unreffed < n) {
818 for (; n > 0; info++, n--) {
819 if (info->userdata)
820 pa_sink_input_unref(info->userdata);
821 if (info->chunk.memblock)
822 pa_memblock_unref(info->chunk.memblock);
823 }
824 }
825
826 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
827 pa_source_post(s->monitor_source, result);
828 }
829
830 /* Called from IO thread context */
831 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
832 pa_mix_info info[MAX_MIX_CHANNELS];
833 unsigned n;
834 size_t block_size_max;
835
836 pa_sink_assert_ref(s);
837 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
838 pa_assert(pa_frame_aligned(length, &s->sample_spec));
839 pa_assert(result);
840
841 pa_sink_ref(s);
842
843 pa_assert(!s->thread_info.rewind_requested);
844 pa_assert(s->thread_info.rewind_nbytes == 0);
845
846 if (s->thread_info.state == PA_SINK_SUSPENDED) {
847 result->memblock = pa_memblock_ref(s->silence.memblock);
848 result->index = s->silence.index;
849 result->length = PA_MIN(s->silence.length, length);
850 return;
851 }
852
853 if (length <= 0)
854 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
855
856 block_size_max = pa_mempool_block_size_max(s->core->mempool);
857 if (length > block_size_max)
858 length = pa_frame_align(block_size_max, &s->sample_spec);
859
860 pa_assert(length > 0);
861
862 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
863
864 if (n == 0) {
865
866 *result = s->silence;
867 pa_memblock_ref(result->memblock);
868
869 if (result->length > length)
870 result->length = length;
871
872 } else if (n == 1) {
873 pa_cvolume volume;
874
875 *result = info[0].chunk;
876 pa_memblock_ref(result->memblock);
877
878 if (result->length > length)
879 result->length = length;
880
881 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
882
883 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
884 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
885 pa_memblock_unref(result->memblock);
886 pa_silence_memchunk_get(&s->core->silence_cache,
887 s->core->mempool,
888 result,
889 &s->sample_spec,
890 result->length);
891 } else {
892 pa_memchunk_make_writable(result, 0);
893 pa_volume_memchunk(result, &s->sample_spec, &volume);
894 }
895 }
896 } else {
897 void *ptr;
898 result->memblock = pa_memblock_new(s->core->mempool, length);
899
900 ptr = pa_memblock_acquire(result->memblock);
901 result->length = pa_mix(info, n,
902 ptr, length,
903 &s->sample_spec,
904 &s->thread_info.soft_volume,
905 s->thread_info.soft_muted);
906 pa_memblock_release(result->memblock);
907
908 result->index = 0;
909 }
910
911 inputs_drop(s, info, n, result);
912
913 pa_sink_unref(s);
914 }
915
916 /* Called from IO thread context */
917 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
918 pa_mix_info info[MAX_MIX_CHANNELS];
919 unsigned n;
920 size_t length, block_size_max;
921
922 pa_sink_assert_ref(s);
923 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
924 pa_assert(target);
925 pa_assert(target->memblock);
926 pa_assert(target->length > 0);
927 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
928
929 pa_sink_ref(s);
930
931 pa_assert(!s->thread_info.rewind_requested);
932 pa_assert(s->thread_info.rewind_nbytes == 0);
933
934 if (s->thread_info.state == PA_SINK_SUSPENDED) {
935 pa_silence_memchunk(target, &s->sample_spec);
936 return;
937 }
938
939 length = target->length;
940 block_size_max = pa_mempool_block_size_max(s->core->mempool);
941 if (length > block_size_max)
942 length = pa_frame_align(block_size_max, &s->sample_spec);
943
944 pa_assert(length > 0);
945
946 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
947
948 if (n == 0) {
949 if (target->length > length)
950 target->length = length;
951
952 pa_silence_memchunk(target, &s->sample_spec);
953 } else if (n == 1) {
954 pa_cvolume volume;
955
956 if (target->length > length)
957 target->length = length;
958
959 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
960
961 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
962 pa_silence_memchunk(target, &s->sample_spec);
963 else {
964 pa_memchunk vchunk;
965
966 vchunk = info[0].chunk;
967 pa_memblock_ref(vchunk.memblock);
968
969 if (vchunk.length > length)
970 vchunk.length = length;
971
972 if (!pa_cvolume_is_norm(&volume)) {
973 pa_memchunk_make_writable(&vchunk, 0);
974 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
975 }
976
977 pa_memchunk_memcpy(target, &vchunk);
978 pa_memblock_unref(vchunk.memblock);
979 }
980
981 } else {
982 void *ptr;
983
984 ptr = pa_memblock_acquire(target->memblock);
985
986 target->length = pa_mix(info, n,
987 (uint8_t*) ptr + target->index, length,
988 &s->sample_spec,
989 &s->thread_info.soft_volume,
990 s->thread_info.soft_muted);
991
992 pa_memblock_release(target->memblock);
993 }
994
995 inputs_drop(s, info, n, target);
996
997 pa_sink_unref(s);
998 }
999
1000 /* Called from IO thread context */
1001 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1002 pa_memchunk chunk;
1003 size_t l, d;
1004
1005 pa_sink_assert_ref(s);
1006 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1007 pa_assert(target);
1008 pa_assert(target->memblock);
1009 pa_assert(target->length > 0);
1010 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1011
1012 pa_sink_ref(s);
1013
1014 pa_assert(!s->thread_info.rewind_requested);
1015 pa_assert(s->thread_info.rewind_nbytes == 0);
1016
1017 l = target->length;
1018 d = 0;
1019 while (l > 0) {
1020 chunk = *target;
1021 chunk.index += d;
1022 chunk.length -= d;
1023
1024 pa_sink_render_into(s, &chunk);
1025
1026 d += chunk.length;
1027 l -= chunk.length;
1028 }
1029
1030 pa_sink_unref(s);
1031 }
1032
1033 /* Called from IO thread context */
1034 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1035 pa_mix_info info[MAX_MIX_CHANNELS];
1036 size_t length1st = length;
1037 unsigned n;
1038
1039 pa_sink_assert_ref(s);
1040 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1041 pa_assert(length > 0);
1042 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1043 pa_assert(result);
1044
1045 pa_sink_ref(s);
1046
1047 pa_assert(!s->thread_info.rewind_requested);
1048 pa_assert(s->thread_info.rewind_nbytes == 0);
1049
1050 pa_assert(length > 0);
1051
1052 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1053
1054 if (n == 0) {
1055 pa_silence_memchunk_get(&s->core->silence_cache,
1056 s->core->mempool,
1057 result,
1058 &s->sample_spec,
1059 length1st);
1060 } else if (n == 1) {
1061 pa_cvolume volume;
1062
1063 *result = info[0].chunk;
1064 pa_memblock_ref(result->memblock);
1065
1066 if (result->length > length)
1067 result->length = length;
1068
1069 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1070
1071 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1072 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1073 pa_memblock_unref(result->memblock);
1074 pa_silence_memchunk_get(&s->core->silence_cache,
1075 s->core->mempool,
1076 result,
1077 &s->sample_spec,
1078 result->length);
1079 } else {
1080 pa_memchunk_make_writable(result, length);
1081 pa_volume_memchunk(result, &s->sample_spec, &volume);
1082 }
1083 }
1084 } else {
1085 void *ptr;
1086
1087 result->index = 0;
1088 result->memblock = pa_memblock_new(s->core->mempool, length);
1089
1090 ptr = pa_memblock_acquire(result->memblock);
1091
1092 result->length = pa_mix(info, n,
1093 (uint8_t*) ptr + result->index, length1st,
1094 &s->sample_spec,
1095 &s->thread_info.soft_volume,
1096 s->thread_info.soft_muted);
1097
1098 pa_memblock_release(result->memblock);
1099 }
1100
1101 inputs_drop(s, info, n, result);
1102
1103 if (result->length < length) {
1104 pa_memchunk chunk;
1105 size_t l, d;
1106 pa_memchunk_make_writable(result, length);
1107
1108 l = length - result->length;
1109 d = result->index + result->length;
1110 while (l > 0) {
1111 chunk = *result;
1112 chunk.index = d;
1113 chunk.length = l;
1114
1115 pa_sink_render_into(s, &chunk);
1116
1117 d += chunk.length;
1118 l -= chunk.length;
1119 }
1120 result->length = length;
1121 }
1122
1123 pa_sink_unref(s);
1124 }
1125
1126 /* Called from main thread */
1127 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1128 pa_usec_t usec = 0;
1129
1130 pa_sink_assert_ref(s);
1131 pa_assert(PA_SINK_IS_LINKED(s->state));
1132
1133 /* The returned value is supposed to be in the time domain of the sound card! */
1134
1135 if (s->state == PA_SINK_SUSPENDED)
1136 return 0;
1137
1138 if (!(s->flags & PA_SINK_LATENCY))
1139 return 0;
1140
1141 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1142
1143 return usec;
1144 }
1145
1146 /* Called from IO thread */
1147 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1148 pa_usec_t usec = 0;
1149 pa_msgobject *o;
1150
1151 pa_sink_assert_ref(s);
1152 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1153
1154 /* The returned value is supposed to be in the time domain of the sound card! */
1155
1156 if (s->thread_info.state == PA_SINK_SUSPENDED)
1157 return 0;
1158
1159 if (!(s->flags & PA_SINK_LATENCY))
1160 return 0;
1161
1162 o = PA_MSGOBJECT(s);
1163
1164 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1165
1166 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1167 return -1;
1168
1169 return usec;
1170 }
1171
1172 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1173 unsigned c;
1174
1175 pa_sink_input_assert_ref(i);
1176 pa_assert(new_volume->channels == i->sample_spec.channels);
1177
1178 /*
1179 * This basically calculates:
1180 *
1181 * i->relative_volume := i->virtual_volume / new_volume
1182 * i->soft_volume := i->relative_volume * i->volume_factor
1183 */
1184
1185 /* The new sink volume passed in here must already be remapped to
1186 * the sink input's channel map! */
1187
1188 i->soft_volume.channels = i->sample_spec.channels;
1189
1190 for (c = 0; c < i->sample_spec.channels; c++)
1191
1192 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1193 /* We leave i->relative_volume untouched */
1194 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1195 else {
1196 i->relative_volume[c] =
1197 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1198 pa_sw_volume_to_linear(new_volume->values[c]);
1199
1200 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1201 i->relative_volume[c] *
1202 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1203 }
1204
1205 /* Hooks have the ability to play games with i->soft_volume */
1206 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1207
1208 /* We don't copy the soft_volume to the thread_info data
1209 * here. That must be done by the caller */
1210 }
1211
1212 /* Called from main thread */
1213 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1214 pa_sink_input *i;
1215 uint32_t idx;
1216
1217 pa_sink_assert_ref(s);
1218 pa_assert(new_volume);
1219 pa_assert(PA_SINK_IS_LINKED(s->state));
1220 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1221
1222 /* This is called whenever a sink input volume changes or a sink
1223 * input is added/removed and we might need to fix up the sink
1224 * volume accordingly. Please note that we don't actually update
1225 * the sinks volume here, we only return how it needs to be
1226 * updated. The caller should then call pa_sink_set_volume().*/
1227
1228 if (pa_idxset_isempty(s->inputs)) {
1229 /* In the special case that we have no sink input we leave the
1230 * volume unmodified. */
1231 *new_volume = s->reference_volume;
1232 return;
1233 }
1234
1235 pa_cvolume_mute(new_volume, s->channel_map.channels);
1236
1237 /* First let's determine the new maximum volume of all inputs
1238 * connected to this sink */
1239 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1240 unsigned c;
1241 pa_cvolume remapped_volume;
1242
1243 remapped_volume = i->virtual_volume;
1244 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1245
1246 for (c = 0; c < new_volume->channels; c++)
1247 if (remapped_volume.values[c] > new_volume->values[c])
1248 new_volume->values[c] = remapped_volume.values[c];
1249 }
1250
1251 /* Then, let's update the soft volumes of all inputs connected
1252 * to this sink */
1253 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1254 pa_cvolume remapped_new_volume;
1255
1256 remapped_new_volume = *new_volume;
1257 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1258 compute_new_soft_volume(i, &remapped_new_volume);
1259
1260 /* We don't copy soft_volume to the thread_info data here
1261 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1262 * want the update to be atomically with the sink volume
1263 * update, hence we do it within the pa_sink_set_volume() call
1264 * below */
1265 }
1266 }
1267
1268 /* Called from main thread */
1269 void pa_sink_propagate_flat_volume(pa_sink *s) {
1270 pa_sink_input *i;
1271 uint32_t idx;
1272
1273 pa_sink_assert_ref(s);
1274 pa_assert(PA_SINK_IS_LINKED(s->state));
1275 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1276
1277 /* This is called whenever the sink volume changes that is not
1278 * caused by a sink input volume change. We need to fix up the
1279 * sink input volumes accordingly */
1280
1281 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1282 pa_cvolume sink_volume, new_virtual_volume;
1283 unsigned c;
1284
1285 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1286
1287 sink_volume = s->virtual_volume;
1288 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1289
1290 for (c = 0; c < i->sample_spec.channels; c++)
1291 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1292 i->relative_volume[c] *
1293 pa_sw_volume_to_linear(sink_volume.values[c]));
1294
1295 new_virtual_volume.channels = i->sample_spec.channels;
1296
1297 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1298 i->virtual_volume = new_virtual_volume;
1299
1300 /* Hmm, the soft volume might no longer actually match
1301 * what has been chosen as new virtual volume here,
1302 * especially when the old volume was
1303 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1304 * volumes here. */
1305 compute_new_soft_volume(i, &sink_volume);
1306
1307 /* The virtual volume changed, let's tell people so */
1308 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1309 }
1310 }
1311
1312 /* If the soft_volume of any of the sink inputs got changed, let's
1313 * make sure the thread copies are synced up. */
1314 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1315 }
1316
1317 /* Called from main thread */
1318 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1319 pa_bool_t virtual_volume_changed;
1320
1321 pa_sink_assert_ref(s);
1322 pa_assert(PA_SINK_IS_LINKED(s->state));
1323 pa_assert(volume);
1324 pa_assert(pa_cvolume_valid(volume));
1325 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1326
1327 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1328 s->virtual_volume = *volume;
1329 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1330
1331 if (become_reference)
1332 s->reference_volume = s->virtual_volume;
1333
1334 /* Propagate this volume change back to the inputs */
1335 if (virtual_volume_changed)
1336 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1337 pa_sink_propagate_flat_volume(s);
1338
1339 if (s->set_volume) {
1340 /* If we have a function set_volume(), then we do not apply a
1341 * soft volume by default. However, set_volume() is free to
1342 * apply one to s->soft_volume */
1343
1344 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1345 s->set_volume(s);
1346
1347 } else
1348 /* If we have no function set_volume(), then the soft volume
1349 * becomes the virtual volume */
1350 s->soft_volume = s->virtual_volume;
1351
1352 /* This tells the sink that soft and/or virtual volume changed */
1353 if (sendmsg)
1354 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1355
1356 if (virtual_volume_changed)
1357 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1358 }
1359
1360 /* Called from main thread. Only to be called by sink implementor */
1361 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1362 pa_sink_assert_ref(s);
1363 pa_assert(volume);
1364
1365 s->soft_volume = *volume;
1366
1367 if (PA_SINK_IS_LINKED(s->state))
1368 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1369 else
1370 s->thread_info.soft_volume = *volume;
1371 }
1372
1373 /* Called from main thread */
1374 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1375 pa_sink_assert_ref(s);
1376
1377 if (s->refresh_volume || force_refresh) {
1378 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1379
1380 if (s->get_volume)
1381 s->get_volume(s);
1382
1383 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1384
1385 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1386
1387 s->reference_volume = s->virtual_volume;
1388
1389 if (s->flags & PA_SINK_FLAT_VOLUME)
1390 pa_sink_propagate_flat_volume(s);
1391
1392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1393 }
1394 }
1395
1396 return reference ? &s->reference_volume : &s->virtual_volume;
1397 }
1398
1399 /* Called from main thread */
1400 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume, pa_bool_t save) {
1401 pa_sink_assert_ref(s);
1402
1403 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1404 if (pa_cvolume_equal(&s->virtual_volume, new_volume)) {
1405 s->save_volume = s->save_volume || save;
1406 return;
1407 }
1408
1409 s->reference_volume = s->virtual_volume = *new_volume;
1410 s->save_volume = save;
1411
1412 if (s->flags & PA_SINK_FLAT_VOLUME)
1413 pa_sink_propagate_flat_volume(s);
1414
1415 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1416 }
1417
1418 /* Called from main thread */
1419 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1420 pa_bool_t old_muted;
1421
1422 pa_sink_assert_ref(s);
1423 pa_assert(PA_SINK_IS_LINKED(s->state));
1424
1425 old_muted = s->muted;
1426 s->muted = mute;
1427 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1428
1429 if (s->set_mute)
1430 s->set_mute(s);
1431
1432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1433
1434 if (old_muted != s->muted)
1435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1436 }
1437
1438 /* Called from main thread */
1439 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1440
1441 pa_sink_assert_ref(s);
1442
1443 if (s->refresh_muted || force_refresh) {
1444 pa_bool_t old_muted = s->muted;
1445
1446 if (s->get_mute)
1447 s->get_mute(s);
1448
1449 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1450
1451 if (old_muted != s->muted) {
1452 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1453
1454 /* Make sure the soft mute status stays in sync */
1455 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1456 }
1457 }
1458
1459 return s->muted;
1460 }
1461
1462 /* Called from main thread */
1463 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted, pa_bool_t save) {
1464 pa_sink_assert_ref(s);
1465
1466 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1467
1468 if (s->muted == new_muted) {
1469 s->save_muted = s->save_muted || save;
1470 return;
1471 }
1472
1473 s->muted = new_muted;
1474 s->save_muted = save;
1475
1476 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1477 }
1478
1479 /* Called from main thread */
1480 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1481 pa_sink_assert_ref(s);
1482
1483 if (p)
1484 pa_proplist_update(s->proplist, mode, p);
1485
1486 if (PA_SINK_IS_LINKED(s->state)) {
1487 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1488 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1489 }
1490
1491 return TRUE;
1492 }
1493
1494 /* Called from main thread */
1495 void pa_sink_set_description(pa_sink *s, const char *description) {
1496 const char *old;
1497 pa_sink_assert_ref(s);
1498
1499 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1500 return;
1501
1502 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1503
1504 if (old && description && !strcmp(old, description))
1505 return;
1506
1507 if (description)
1508 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1509 else
1510 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1511
1512 if (s->monitor_source) {
1513 char *n;
1514
1515 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1516 pa_source_set_description(s->monitor_source, n);
1517 pa_xfree(n);
1518 }
1519
1520 if (PA_SINK_IS_LINKED(s->state)) {
1521 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1522 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1523 }
1524 }
1525
1526 /* Called from main thread */
1527 unsigned pa_sink_linked_by(pa_sink *s) {
1528 unsigned ret;
1529
1530 pa_sink_assert_ref(s);
1531 pa_assert(PA_SINK_IS_LINKED(s->state));
1532
1533 ret = pa_idxset_size(s->inputs);
1534
1535 /* We add in the number of streams connected to us here. Please
1536 * note the asymmmetry to pa_sink_used_by()! */
1537
1538 if (s->monitor_source)
1539 ret += pa_source_linked_by(s->monitor_source);
1540
1541 return ret;
1542 }
1543
1544 /* Called from main thread */
1545 unsigned pa_sink_used_by(pa_sink *s) {
1546 unsigned ret;
1547
1548 pa_sink_assert_ref(s);
1549 pa_assert(PA_SINK_IS_LINKED(s->state));
1550
1551 ret = pa_idxset_size(s->inputs);
1552 pa_assert(ret >= s->n_corked);
1553
1554 /* Streams connected to our monitor source do not matter for
1555 * pa_sink_used_by()!.*/
1556
1557 return ret - s->n_corked;
1558 }
1559
1560 /* Called from main thread */
1561 unsigned pa_sink_check_suspend(pa_sink *s) {
1562 unsigned ret;
1563 pa_sink_input *i;
1564 uint32_t idx;
1565
1566 pa_sink_assert_ref(s);
1567
1568 if (!PA_SINK_IS_LINKED(s->state))
1569 return 0;
1570
1571 ret = 0;
1572
1573 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1574 pa_sink_input_state_t st;
1575
1576 st = pa_sink_input_get_state(i);
1577 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1578
1579 if (st == PA_SINK_INPUT_CORKED)
1580 continue;
1581
1582 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1583 continue;
1584
1585 ret ++;
1586 }
1587
1588 if (s->monitor_source)
1589 ret += pa_source_check_suspend(s->monitor_source);
1590
1591 return ret;
1592 }
1593
1594 /* Called from the IO thread */
1595 static void sync_input_volumes_within_thread(pa_sink *s) {
1596 pa_sink_input *i;
1597 void *state = NULL;
1598
1599 pa_sink_assert_ref(s);
1600
1601 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1602 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1603 continue;
1604
1605 i->thread_info.soft_volume = i->soft_volume;
1606 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1607 }
1608 }
1609
1610 /* Called from IO thread, except when it is not */
1611 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1612 pa_sink *s = PA_SINK(o);
1613 pa_sink_assert_ref(s);
1614
1615 switch ((pa_sink_message_t) code) {
1616
1617 case PA_SINK_MESSAGE_ADD_INPUT: {
1618 pa_sink_input *i = PA_SINK_INPUT(userdata);
1619
1620 /* If you change anything here, make sure to change the
1621 * sink input handling a few lines down at
1622 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1623
1624 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1625
1626 /* Since the caller sleeps in pa_sink_input_put(), we can
1627 * safely access data outside of thread_info even though
1628 * it is mutable */
1629
1630 if ((i->thread_info.sync_prev = i->sync_prev)) {
1631 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1632 pa_assert(i->sync_prev->sync_next == i);
1633 i->thread_info.sync_prev->thread_info.sync_next = i;
1634 }
1635
1636 if ((i->thread_info.sync_next = i->sync_next)) {
1637 pa_assert(i->sink == i->thread_info.sync_next->sink);
1638 pa_assert(i->sync_next->sync_prev == i);
1639 i->thread_info.sync_next->thread_info.sync_prev = i;
1640 }
1641
1642 pa_assert(!i->thread_info.attached);
1643 i->thread_info.attached = TRUE;
1644
1645 if (i->attach)
1646 i->attach(i);
1647
1648 pa_sink_input_set_state_within_thread(i, i->state);
1649
1650 /* The requested latency of the sink input needs to be
1651 * fixed up and then configured on the sink */
1652
1653 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1654 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1655
1656 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1657 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1658
1659 /* We don't rewind here automatically. This is left to the
1660 * sink input implementor because some sink inputs need a
1661 * slow start, i.e. need some time to buffer client
1662 * samples before beginning streaming. */
1663
1664 /* In flat volume mode we need to update the volume as
1665 * well */
1666 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1667 }
1668
1669 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1670 pa_sink_input *i = PA_SINK_INPUT(userdata);
1671
1672 /* If you change anything here, make sure to change the
1673 * sink input handling a few lines down at
1674 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1675
1676 if (i->detach)
1677 i->detach(i);
1678
1679 pa_sink_input_set_state_within_thread(i, i->state);
1680
1681 pa_assert(i->thread_info.attached);
1682 i->thread_info.attached = FALSE;
1683
1684 /* Since the caller sleeps in pa_sink_input_unlink(),
1685 * we can safely access data outside of thread_info even
1686 * though it is mutable */
1687
1688 pa_assert(!i->sync_prev);
1689 pa_assert(!i->sync_next);
1690
1691 if (i->thread_info.sync_prev) {
1692 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1693 i->thread_info.sync_prev = NULL;
1694 }
1695
1696 if (i->thread_info.sync_next) {
1697 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1698 i->thread_info.sync_next = NULL;
1699 }
1700
1701 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1702 pa_sink_input_unref(i);
1703
1704 pa_sink_invalidate_requested_latency(s);
1705 pa_sink_request_rewind(s, (size_t) -1);
1706
1707 /* In flat volume mode we need to update the volume as
1708 * well */
1709 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1710 }
1711
1712 case PA_SINK_MESSAGE_START_MOVE: {
1713 pa_sink_input *i = PA_SINK_INPUT(userdata);
1714
1715 /* We don't support moving synchronized streams. */
1716 pa_assert(!i->sync_prev);
1717 pa_assert(!i->sync_next);
1718 pa_assert(!i->thread_info.sync_next);
1719 pa_assert(!i->thread_info.sync_prev);
1720
1721 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1722 pa_usec_t usec = 0;
1723 size_t sink_nbytes, total_nbytes;
1724
1725 /* Get the latency of the sink */
1726 if (!(s->flags & PA_SINK_LATENCY) ||
1727 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1728 usec = 0;
1729
1730 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1731 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1732
1733 if (total_nbytes > 0) {
1734 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1735 i->thread_info.rewrite_flush = TRUE;
1736 pa_sink_input_process_rewind(i, sink_nbytes);
1737 }
1738 }
1739
1740 if (i->detach)
1741 i->detach(i);
1742
1743 pa_assert(i->thread_info.attached);
1744 i->thread_info.attached = FALSE;
1745
1746 /* Let's remove the sink input ...*/
1747 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1748 pa_sink_input_unref(i);
1749
1750 pa_sink_invalidate_requested_latency(s);
1751
1752 pa_log_debug("Requesting rewind due to started move");
1753 pa_sink_request_rewind(s, (size_t) -1);
1754
1755 /* In flat volume mode we need to update the volume as
1756 * well */
1757 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1758 }
1759
1760 case PA_SINK_MESSAGE_FINISH_MOVE: {
1761 pa_sink_input *i = PA_SINK_INPUT(userdata);
1762
1763 /* We don't support moving synchronized streams. */
1764 pa_assert(!i->sync_prev);
1765 pa_assert(!i->sync_next);
1766 pa_assert(!i->thread_info.sync_next);
1767 pa_assert(!i->thread_info.sync_prev);
1768
1769 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1770
1771 pa_assert(!i->thread_info.attached);
1772 i->thread_info.attached = TRUE;
1773
1774 if (i->attach)
1775 i->attach(i);
1776
1777 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1778 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1779
1780 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1781 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1782
1783 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1784 pa_usec_t usec = 0;
1785 size_t nbytes;
1786
1787 /* Get the latency of the sink */
1788 if (!(s->flags & PA_SINK_LATENCY) ||
1789 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1790 usec = 0;
1791
1792 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1793
1794 if (nbytes > 0)
1795 pa_sink_input_drop(i, nbytes);
1796
1797 pa_log_debug("Requesting rewind due to finished move");
1798 pa_sink_request_rewind(s, nbytes);
1799 }
1800
1801 /* In flat volume mode we need to update the volume as
1802 * well */
1803 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1804 }
1805
1806 case PA_SINK_MESSAGE_SET_VOLUME:
1807
1808 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1809 s->thread_info.soft_volume = s->soft_volume;
1810 pa_sink_request_rewind(s, (size_t) -1);
1811 }
1812
1813 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1814 return 0;
1815
1816 /* Fall through ... */
1817
1818 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1819 sync_input_volumes_within_thread(s);
1820 return 0;
1821
1822 case PA_SINK_MESSAGE_GET_VOLUME:
1823 return 0;
1824
1825 case PA_SINK_MESSAGE_SET_MUTE:
1826
1827 if (s->thread_info.soft_muted != s->muted) {
1828 s->thread_info.soft_muted = s->muted;
1829 pa_sink_request_rewind(s, (size_t) -1);
1830 }
1831
1832 return 0;
1833
1834 case PA_SINK_MESSAGE_GET_MUTE:
1835 return 0;
1836
1837 case PA_SINK_MESSAGE_SET_STATE: {
1838
1839 pa_bool_t suspend_change =
1840 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1841 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1842
1843 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1844
1845 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1846 s->thread_info.rewind_nbytes = 0;
1847 s->thread_info.rewind_requested = FALSE;
1848 }
1849
1850 if (suspend_change) {
1851 pa_sink_input *i;
1852 void *state = NULL;
1853
1854 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1855 if (i->suspend_within_thread)
1856 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1857 }
1858
1859 return 0;
1860 }
1861
1862 case PA_SINK_MESSAGE_DETACH:
1863
1864 /* Detach all streams */
1865 pa_sink_detach_within_thread(s);
1866 return 0;
1867
1868 case PA_SINK_MESSAGE_ATTACH:
1869
1870 /* Reattach all streams */
1871 pa_sink_attach_within_thread(s);
1872 return 0;
1873
1874 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1875
1876 pa_usec_t *usec = userdata;
1877 *usec = pa_sink_get_requested_latency_within_thread(s);
1878
1879 if (*usec == (pa_usec_t) -1)
1880 *usec = s->thread_info.max_latency;
1881
1882 return 0;
1883 }
1884
1885 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1886 pa_usec_t *r = userdata;
1887
1888 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1889
1890 return 0;
1891 }
1892
1893 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1894 pa_usec_t *r = userdata;
1895
1896 r[0] = s->thread_info.min_latency;
1897 r[1] = s->thread_info.max_latency;
1898
1899 return 0;
1900 }
1901
1902 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1903
1904 *((size_t*) userdata) = s->thread_info.max_rewind;
1905 return 0;
1906
1907 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1908
1909 *((size_t*) userdata) = s->thread_info.max_request;
1910 return 0;
1911
1912 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1913
1914 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1915 return 0;
1916
1917 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1918
1919 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1920 return 0;
1921
1922 case PA_SINK_MESSAGE_GET_LATENCY:
1923 case PA_SINK_MESSAGE_MAX:
1924 ;
1925 }
1926
1927 return -1;
1928 }
1929
1930 /* Called from main thread */
1931 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1932 pa_sink *sink;
1933 uint32_t idx;
1934 int ret = 0;
1935
1936 pa_core_assert_ref(c);
1937 pa_assert(cause != 0);
1938
1939 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1940 int r;
1941
1942 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1943 ret = r;
1944 }
1945
1946 return ret;
1947 }
1948
1949 /* Called from main thread */
1950 void pa_sink_detach(pa_sink *s) {
1951 pa_sink_assert_ref(s);
1952 pa_assert(PA_SINK_IS_LINKED(s->state));
1953
1954 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1955 }
1956
1957 /* Called from main thread */
1958 void pa_sink_attach(pa_sink *s) {
1959 pa_sink_assert_ref(s);
1960 pa_assert(PA_SINK_IS_LINKED(s->state));
1961
1962 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1963 }
1964
1965 /* Called from IO thread */
1966 void pa_sink_detach_within_thread(pa_sink *s) {
1967 pa_sink_input *i;
1968 void *state = NULL;
1969
1970 pa_sink_assert_ref(s);
1971 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1972
1973 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1974 if (i->detach)
1975 i->detach(i);
1976
1977 if (s->monitor_source)
1978 pa_source_detach_within_thread(s->monitor_source);
1979 }
1980
1981 /* Called from IO thread */
1982 void pa_sink_attach_within_thread(pa_sink *s) {
1983 pa_sink_input *i;
1984 void *state = NULL;
1985
1986 pa_sink_assert_ref(s);
1987 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1988
1989 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1990 if (i->attach)
1991 i->attach(i);
1992
1993 if (s->monitor_source)
1994 pa_source_attach_within_thread(s->monitor_source);
1995 }
1996
1997 /* Called from IO thread */
1998 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
1999 pa_sink_assert_ref(s);
2000 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2001
2002 if (s->thread_info.state == PA_SINK_SUSPENDED)
2003 return;
2004
2005 if (nbytes == (size_t) -1)
2006 nbytes = s->thread_info.max_rewind;
2007
2008 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2009
2010 if (s->thread_info.rewind_requested &&
2011 nbytes <= s->thread_info.rewind_nbytes)
2012 return;
2013
2014 s->thread_info.rewind_nbytes = nbytes;
2015 s->thread_info.rewind_requested = TRUE;
2016
2017 if (s->request_rewind)
2018 s->request_rewind(s);
2019 }
2020
2021 /* Called from IO thread */
2022 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2023 pa_usec_t result = (pa_usec_t) -1;
2024 pa_sink_input *i;
2025 void *state = NULL;
2026 pa_usec_t monitor_latency;
2027
2028 pa_sink_assert_ref(s);
2029
2030 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2031 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2032
2033 if (s->thread_info.requested_latency_valid)
2034 return s->thread_info.requested_latency;
2035
2036 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2037
2038 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2039 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2040 result = i->thread_info.requested_sink_latency;
2041
2042 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2043
2044 if (monitor_latency != (pa_usec_t) -1 &&
2045 (result == (pa_usec_t) -1 || result > monitor_latency))
2046 result = monitor_latency;
2047
2048 if (result != (pa_usec_t) -1)
2049 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2050
2051 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2052 /* Only cache if properly initialized */
2053 s->thread_info.requested_latency = result;
2054 s->thread_info.requested_latency_valid = TRUE;
2055 }
2056
2057 return result;
2058 }
2059
2060 /* Called from main thread */
2061 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2062 pa_usec_t usec = 0;
2063
2064 pa_sink_assert_ref(s);
2065 pa_assert(PA_SINK_IS_LINKED(s->state));
2066
2067 if (s->state == PA_SINK_SUSPENDED)
2068 return 0;
2069
2070 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2071 return usec;
2072 }
2073
2074 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2075 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2076 pa_sink_input *i;
2077 void *state = NULL;
2078
2079 pa_sink_assert_ref(s);
2080
2081 if (max_rewind == s->thread_info.max_rewind)
2082 return;
2083
2084 s->thread_info.max_rewind = max_rewind;
2085
2086 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2087 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2088 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2089 }
2090
2091 if (s->monitor_source)
2092 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2093 }
2094
2095 /* Called from main thread */
2096 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2097 pa_sink_assert_ref(s);
2098
2099 if (PA_SINK_IS_LINKED(s->state))
2100 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2101 else
2102 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2103 }
2104
2105 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2106 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2107 void *state = NULL;
2108
2109 pa_sink_assert_ref(s);
2110
2111 if (max_request == s->thread_info.max_request)
2112 return;
2113
2114 s->thread_info.max_request = max_request;
2115
2116 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2117 pa_sink_input *i;
2118
2119 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2120 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2121 }
2122 }
2123
2124 /* Called from main thread */
2125 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2126 pa_sink_assert_ref(s);
2127
2128 if (PA_SINK_IS_LINKED(s->state))
2129 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2130 else
2131 pa_sink_set_max_request_within_thread(s, max_request);
2132 }
2133
2134 /* Called from IO thread */
2135 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2136 pa_sink_input *i;
2137 void *state = NULL;
2138
2139 pa_sink_assert_ref(s);
2140
2141 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2142 return;
2143
2144 s->thread_info.requested_latency_valid = FALSE;
2145
2146 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2147
2148 if (s->update_requested_latency)
2149 s->update_requested_latency(s);
2150
2151 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2152 if (i->update_sink_requested_latency)
2153 i->update_sink_requested_latency(i);
2154 }
2155 }
2156
2157 /* Called from main thread */
2158 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2159 pa_sink_assert_ref(s);
2160
2161 /* min_latency == 0: no limit
2162 * min_latency anything else: specified limit
2163 *
2164 * Similar for max_latency */
2165
2166 if (min_latency < ABSOLUTE_MIN_LATENCY)
2167 min_latency = ABSOLUTE_MIN_LATENCY;
2168
2169 if (max_latency <= 0 ||
2170 max_latency > ABSOLUTE_MAX_LATENCY)
2171 max_latency = ABSOLUTE_MAX_LATENCY;
2172
2173 pa_assert(min_latency <= max_latency);
2174
2175 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2176 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2177 max_latency == ABSOLUTE_MAX_LATENCY) ||
2178 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2179
2180 if (PA_SINK_IS_LINKED(s->state)) {
2181 pa_usec_t r[2];
2182
2183 r[0] = min_latency;
2184 r[1] = max_latency;
2185
2186 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2187 } else
2188 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2189 }
2190
2191 /* Called from main thread */
2192 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2193 pa_sink_assert_ref(s);
2194 pa_assert(min_latency);
2195 pa_assert(max_latency);
2196
2197 if (PA_SINK_IS_LINKED(s->state)) {
2198 pa_usec_t r[2] = { 0, 0 };
2199
2200 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2201
2202 *min_latency = r[0];
2203 *max_latency = r[1];
2204 } else {
2205 *min_latency = s->thread_info.min_latency;
2206 *max_latency = s->thread_info.max_latency;
2207 }
2208 }
2209
2210 /* Called from IO thread */
2211 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2212 void *state = NULL;
2213
2214 pa_sink_assert_ref(s);
2215
2216 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2217 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2218 pa_assert(min_latency <= max_latency);
2219
2220 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2221 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2222 max_latency == ABSOLUTE_MAX_LATENCY) ||
2223 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2224
2225 s->thread_info.min_latency = min_latency;
2226 s->thread_info.max_latency = max_latency;
2227
2228 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2229 pa_sink_input *i;
2230
2231 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2232 if (i->update_sink_latency_range)
2233 i->update_sink_latency_range(i);
2234 }
2235
2236 pa_sink_invalidate_requested_latency(s);
2237
2238 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2239 }
2240
2241 /* Called from main thread, before the sink is put */
2242 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2243 pa_sink_assert_ref(s);
2244
2245 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2246
2247 if (latency < ABSOLUTE_MIN_LATENCY)
2248 latency = ABSOLUTE_MIN_LATENCY;
2249
2250 if (latency > ABSOLUTE_MAX_LATENCY)
2251 latency = ABSOLUTE_MAX_LATENCY;
2252
2253 s->fixed_latency = latency;
2254 pa_source_set_fixed_latency(s->monitor_source, latency);
2255 }
2256
2257 /* Called from main context */
2258 size_t pa_sink_get_max_rewind(pa_sink *s) {
2259 size_t r;
2260 pa_sink_assert_ref(s);
2261
2262 if (!PA_SINK_IS_LINKED(s->state))
2263 return s->thread_info.max_rewind;
2264
2265 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2266
2267 return r;
2268 }
2269
2270 /* Called from main context */
2271 size_t pa_sink_get_max_request(pa_sink *s) {
2272 size_t r;
2273 pa_sink_assert_ref(s);
2274
2275 if (!PA_SINK_IS_LINKED(s->state))
2276 return s->thread_info.max_request;
2277
2278 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2279
2280 return r;
2281 }
2282
2283 /* Called from main context */
2284 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2285 pa_device_port *port;
2286
2287 pa_assert(s);
2288
2289 if (!s->set_port) {
2290 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2291 return -PA_ERR_NOTIMPLEMENTED;
2292 }
2293
2294 if (!s->ports)
2295 return -PA_ERR_NOENTITY;
2296
2297 if (!(port = pa_hashmap_get(s->ports, name)))
2298 return -PA_ERR_NOENTITY;
2299
2300 if (s->active_port == port) {
2301 s->save_port = s->save_port || save;
2302 return 0;
2303 }
2304
2305 if ((s->set_port(s, port)) < 0)
2306 return -PA_ERR_NOENTITY;
2307
2308 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2309
2310 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2311
2312 s->active_port = port;
2313 s->save_port = save;
2314
2315 return 0;
2316 }
2317
2318 /* Called from main context */
2319 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2320 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2321
2322 pa_assert(p);
2323
2324 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2325 return TRUE;
2326
2327 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2328
2329 if (pa_streq(ff, "microphone"))
2330 t = "audio-input-microphone";
2331 else if (pa_streq(ff, "webcam"))
2332 t = "camera-web";
2333 else if (pa_streq(ff, "computer"))
2334 t = "computer";
2335 else if (pa_streq(ff, "handset"))
2336 t = "phone";
2337 else if (pa_streq(ff, "portable"))
2338 t = "multimedia-player";
2339 else if (pa_streq(ff, "tv"))
2340 t = "video-display";
2341
2342 /*
2343 * The following icons are not part of the icon naming spec,
2344 * because Rodney Dawes sucks as the maintainer of that spec.
2345 *
2346 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2347 */
2348 else if (pa_streq(ff, "headset"))
2349 t = "audio-headset";
2350 else if (pa_streq(ff, "headphone"))
2351 t = "audio-headphones";
2352 else if (pa_streq(ff, "speaker"))
2353 t = "audio-speakers";
2354 else if (pa_streq(ff, "hands-free"))
2355 t = "audio-handsfree";
2356 }
2357
2358 if (!t)
2359 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2360 if (pa_streq(c, "modem"))
2361 t = "modem";
2362
2363 if (!t) {
2364 if (is_sink)
2365 t = "audio-card";
2366 else
2367 t = "audio-input-microphone";
2368 }
2369
2370 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2371 if (strstr(profile, "analog"))
2372 s = "-analog";
2373 else if (strstr(profile, "iec958"))
2374 s = "-iec958";
2375 else if (strstr(profile, "hdmi"))
2376 s = "-hdmi";
2377 }
2378
2379 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2380
2381 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2382
2383 return TRUE;
2384 }
2385
2386 pa_bool_t pa_device_init_description(pa_proplist *p) {
2387 const char *s;
2388 pa_assert(p);
2389
2390 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2391 return TRUE;
2392
2393 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2394 if (pa_streq(s, "internal")) {
2395 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Internal Audio"));
2396 return TRUE;
2397 }
2398
2399 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2400 if (pa_streq(s, "modem")) {
2401 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, _("Modem"));
2402 return TRUE;
2403 }
2404
2405 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME))) {
2406 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, s);
2407 return TRUE;
2408 }
2409
2410 return FALSE;
2411 }
2412
2413 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2414 const char *s;
2415 pa_assert(p);
2416
2417 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2418 return TRUE;
2419
2420 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2421 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2422 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2423 return TRUE;
2424 }
2425
2426 return FALSE;
2427 }