]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Merge commit 'origin/master-tx'
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180
181 s = pa_msgobject_new(pa_sink);
182
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
184 pa_log_debug("Failed to register name %s.", data->name);
185 pa_xfree(s);
186 return NULL;
187 }
188
189 pa_sink_new_data_set_name(data, name);
190
191 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
192 pa_xfree(s);
193 pa_namereg_unregister(core, name);
194 return NULL;
195 }
196
197 /* FIXME, need to free s here on failure */
198
199 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
200 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
201
202 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
203
204 if (!data->channel_map_is_set)
205 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
206
207 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
208 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
209
210 if (!data->volume_is_set)
211 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
212
213 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
214 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
215
216 if (!data->muted_is_set)
217 data->muted = FALSE;
218
219 if (data->card)
220 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221
222 pa_device_init_description(data->proplist);
223 pa_device_init_icon(data->proplist, TRUE);
224 pa_device_init_intended_roles(data->proplist);
225
226 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
227 pa_xfree(s);
228 pa_namereg_unregister(core, name);
229 return NULL;
230 }
231
232 s->parent.parent.free = sink_free;
233 s->parent.process_msg = pa_sink_process_msg;
234
235 s->core = core;
236 s->state = PA_SINK_INIT;
237 s->flags = flags;
238 s->suspend_cause = 0;
239 s->name = pa_xstrdup(name);
240 s->proplist = pa_proplist_copy(data->proplist);
241 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
242 s->module = data->module;
243 s->card = data->card;
244
245 s->sample_spec = data->sample_spec;
246 s->channel_map = data->channel_map;
247
248 s->inputs = pa_idxset_new(NULL, NULL);
249 s->n_corked = 0;
250
251 s->reference_volume = s->virtual_volume = data->volume;
252 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
253 s->base_volume = PA_VOLUME_NORM;
254 s->n_volume_steps = PA_VOLUME_NORM+1;
255 s->muted = data->muted;
256 s->refresh_volume = s->refresh_muted = FALSE;
257
258 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
259
260 reset_callbacks(s);
261 s->userdata = NULL;
262
263 s->asyncmsgq = NULL;
264 s->rtpoll = NULL;
265
266 /* As a minor optimization we just steal the list instead of
267 * copying it here */
268 s->ports = data->ports;
269 data->ports = NULL;
270
271 s->active_port = NULL;
272 s->save_port = FALSE;
273
274 if (data->active_port && s->ports)
275 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
276 s->save_port = data->save_port;
277
278 if (!s->active_port && s->ports) {
279 void *state;
280 pa_device_port *p;
281
282 PA_HASHMAP_FOREACH(p, s->ports, state)
283 if (!s->active_port || p->priority > s->active_port->priority)
284 s->active_port = p;
285 }
286
287 s->save_volume = data->save_volume;
288 s->save_muted = data->save_muted;
289
290 pa_silence_memchunk_get(
291 &core->silence_cache,
292 core->mempool,
293 &s->silence,
294 &s->sample_spec,
295 0);
296
297 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
298 s->thread_info.soft_volume = s->soft_volume;
299 s->thread_info.soft_muted = s->muted;
300 s->thread_info.state = s->state;
301 s->thread_info.rewind_nbytes = 0;
302 s->thread_info.rewind_requested = FALSE;
303 s->thread_info.max_rewind = 0;
304 s->thread_info.max_request = 0;
305 s->thread_info.requested_latency_valid = FALSE;
306 s->thread_info.requested_latency = 0;
307 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
308 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
309
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
311
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
323
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
331
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339
340 pa_source_new_data_done(&source_data);
341
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
346 }
347
348 s->monitor_source->monitor_of = s;
349
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
352
353 return s;
354 }
355
356 /* Called from main context */
357 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
358 int ret;
359 pa_bool_t suspend_change;
360 pa_sink_state_t original_state;
361
362 pa_assert(s);
363
364 if (s->state == state)
365 return 0;
366
367 original_state = s->state;
368
369 suspend_change =
370 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
371 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
372
373 if (s->set_state)
374 if ((ret = s->set_state(s, state)) < 0)
375 return ret;
376
377 if (s->asyncmsgq)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379
380 if (s->set_state)
381 s->set_state(s, original_state);
382
383 return ret;
384 }
385
386 s->state = state;
387
388 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 }
392
393 if (suspend_change) {
394 pa_sink_input *i;
395 uint32_t idx;
396
397 /* We're suspending or resuming, tell everyone about it */
398
399 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
400 if (s->state == PA_SINK_SUSPENDED &&
401 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
402 pa_sink_input_kill(i);
403 else if (i->suspend)
404 i->suspend(i, state == PA_SINK_SUSPENDED);
405
406 if (s->monitor_source)
407 pa_source_sync_suspend(s->monitor_source);
408 }
409
410 return 0;
411 }
412
413 /* Called from main context */
414 void pa_sink_put(pa_sink* s) {
415 pa_sink_assert_ref(s);
416
417 pa_assert(s->state == PA_SINK_INIT);
418
419 /* The following fields must be initialized properly when calling _put() */
420 pa_assert(s->asyncmsgq);
421 pa_assert(s->rtpoll);
422 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
423
424 /* Generally, flags should be initialized via pa_sink_new(). As a
425 * special exception we allow volume related flags to be set
426 * between _new() and _put(). */
427
428 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
429 s->flags |= PA_SINK_DECIBEL_VOLUME;
430
431 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
432 s->flags |= PA_SINK_FLAT_VOLUME;
433
434 s->thread_info.soft_volume = s->soft_volume;
435 s->thread_info.soft_muted = s->muted;
436
437 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
438 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
439 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
440 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
442
443 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
444 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
445 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
446
447 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
448
449 pa_source_put(s->monitor_source);
450
451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
452 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
453 }
454
455 /* Called from main context */
456 void pa_sink_unlink(pa_sink* s) {
457 pa_bool_t linked;
458 pa_sink_input *i, *j = NULL;
459
460 pa_assert(s);
461
462 /* Please note that pa_sink_unlink() does more than simply
463 * reversing pa_sink_put(). It also undoes the registrations
464 * already done in pa_sink_new()! */
465
466 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
467 * may be called multiple times on the same sink without bad
468 * effects. */
469
470 linked = PA_SINK_IS_LINKED(s->state);
471
472 if (linked)
473 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
474
475 if (s->state != PA_SINK_UNLINKED)
476 pa_namereg_unregister(s->core, s->name);
477 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
478
479 if (s->card)
480 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
481
482 while ((i = pa_idxset_first(s->inputs, NULL))) {
483 pa_assert(i != j);
484 pa_sink_input_kill(i);
485 j = i;
486 }
487
488 if (linked)
489 sink_set_state(s, PA_SINK_UNLINKED);
490 else
491 s->state = PA_SINK_UNLINKED;
492
493 reset_callbacks(s);
494
495 if (s->monitor_source)
496 pa_source_unlink(s->monitor_source);
497
498 if (linked) {
499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
500 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
501 }
502 }
503
504 /* Called from main context */
505 static void sink_free(pa_object *o) {
506 pa_sink *s = PA_SINK(o);
507 pa_sink_input *i;
508
509 pa_assert(s);
510 pa_assert(pa_sink_refcnt(s) == 0);
511
512 if (PA_SINK_IS_LINKED(s->state))
513 pa_sink_unlink(s);
514
515 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
516
517 if (s->monitor_source) {
518 pa_source_unref(s->monitor_source);
519 s->monitor_source = NULL;
520 }
521
522 pa_idxset_free(s->inputs, NULL, NULL);
523
524 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
525 pa_sink_input_unref(i);
526
527 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
528
529 if (s->silence.memblock)
530 pa_memblock_unref(s->silence.memblock);
531
532 pa_xfree(s->name);
533 pa_xfree(s->driver);
534
535 if (s->proplist)
536 pa_proplist_free(s->proplist);
537
538 if (s->ports) {
539 pa_device_port *p;
540
541 while ((p = pa_hashmap_steal_first(s->ports)))
542 pa_device_port_free(p);
543
544 pa_hashmap_free(s->ports, NULL, NULL);
545 }
546
547 pa_xfree(s);
548 }
549
550 /* Called from main context */
551 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
552 pa_sink_assert_ref(s);
553
554 s->asyncmsgq = q;
555
556 if (s->monitor_source)
557 pa_source_set_asyncmsgq(s->monitor_source, q);
558 }
559
560 /* Called from main context */
561 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
562 pa_sink_assert_ref(s);
563
564 s->rtpoll = p;
565
566 if (s->monitor_source)
567 pa_source_set_rtpoll(s->monitor_source, p);
568 }
569
570 /* Called from main context */
571 int pa_sink_update_status(pa_sink*s) {
572 pa_sink_assert_ref(s);
573 pa_assert(PA_SINK_IS_LINKED(s->state));
574
575 if (s->state == PA_SINK_SUSPENDED)
576 return 0;
577
578 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
579 }
580
581 /* Called from main context */
582 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
583 pa_sink_assert_ref(s);
584 pa_assert(PA_SINK_IS_LINKED(s->state));
585 pa_assert(cause != 0);
586
587 if (suspend) {
588 s->suspend_cause |= cause;
589 s->monitor_source->suspend_cause |= cause;
590 } else {
591 s->suspend_cause &= ~cause;
592 s->monitor_source->suspend_cause &= ~cause;
593 }
594
595 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
596 return 0;
597
598 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
599
600 if (s->suspend_cause)
601 return sink_set_state(s, PA_SINK_SUSPENDED);
602 else
603 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
604 }
605
606 /* Called from main context */
607 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
608 pa_sink_input *i, *n;
609 uint32_t idx;
610
611 pa_sink_assert_ref(s);
612 pa_assert(PA_SINK_IS_LINKED(s->state));
613
614 if (!q)
615 q = pa_queue_new();
616
617 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
618 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
619
620 pa_sink_input_ref(i);
621
622 if (pa_sink_input_start_move(i) >= 0)
623 pa_queue_push(q, i);
624 else
625 pa_sink_input_unref(i);
626 }
627
628 return q;
629 }
630
631 /* Called from main context */
632 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
633 pa_sink_input *i;
634
635 pa_sink_assert_ref(s);
636 pa_assert(PA_SINK_IS_LINKED(s->state));
637 pa_assert(q);
638
639 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
640 if (pa_sink_input_finish_move(i, s, save) < 0)
641 pa_sink_input_kill(i);
642
643 pa_sink_input_unref(i);
644 }
645
646 pa_queue_free(q, NULL, NULL);
647 }
648
649 /* Called from main context */
650 void pa_sink_move_all_fail(pa_queue *q) {
651 pa_sink_input *i;
652 pa_assert(q);
653
654 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
655 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
656 pa_sink_input_kill(i);
657 pa_sink_input_unref(i);
658 }
659 }
660
661 pa_queue_free(q, NULL, NULL);
662 }
663
664 /* Called from IO thread context */
665 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
666 pa_sink_input *i;
667 void *state = NULL;
668
669 pa_sink_assert_ref(s);
670 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
671
672 /* If nobody requested this and this is actually no real rewind
673 * then we can short cut this. Please note that this means that
674 * not all rewind requests triggered upstream will always be
675 * translated in actual requests! */
676 if (!s->thread_info.rewind_requested && nbytes <= 0)
677 return;
678
679 s->thread_info.rewind_nbytes = 0;
680 s->thread_info.rewind_requested = FALSE;
681
682 if (s->thread_info.state == PA_SINK_SUSPENDED)
683 return;
684
685 if (nbytes > 0)
686 pa_log_debug("Processing rewind...");
687
688 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
689 pa_sink_input_assert_ref(i);
690 pa_sink_input_process_rewind(i, nbytes);
691 }
692
693 if (nbytes > 0)
694 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
695 pa_source_process_rewind(s->monitor_source, nbytes);
696 }
697
698 /* Called from IO thread context */
699 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
700 pa_sink_input *i;
701 unsigned n = 0;
702 void *state = NULL;
703 size_t mixlength = *length;
704
705 pa_sink_assert_ref(s);
706 pa_assert(info);
707
708 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
709 pa_sink_input_assert_ref(i);
710
711 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
712
713 if (mixlength == 0 || info->chunk.length < mixlength)
714 mixlength = info->chunk.length;
715
716 if (pa_memblock_is_silence(info->chunk.memblock)) {
717 pa_memblock_unref(info->chunk.memblock);
718 continue;
719 }
720
721 info->userdata = pa_sink_input_ref(i);
722
723 pa_assert(info->chunk.memblock);
724 pa_assert(info->chunk.length > 0);
725
726 info++;
727 n++;
728 maxinfo--;
729 }
730
731 if (mixlength > 0)
732 *length = mixlength;
733
734 return n;
735 }
736
737 /* Called from IO thread context */
738 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
739 pa_sink_input *i;
740 void *state = NULL;
741 unsigned p = 0;
742 unsigned n_unreffed = 0;
743
744 pa_sink_assert_ref(s);
745 pa_assert(result);
746 pa_assert(result->memblock);
747 pa_assert(result->length > 0);
748
749 /* We optimize for the case where the order of the inputs has not changed */
750
751 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
752 unsigned j;
753 pa_mix_info* m = NULL;
754
755 pa_sink_input_assert_ref(i);
756
757 /* Let's try to find the matching entry info the pa_mix_info array */
758 for (j = 0; j < n; j ++) {
759
760 if (info[p].userdata == i) {
761 m = info + p;
762 break;
763 }
764
765 p++;
766 if (p >= n)
767 p = 0;
768 }
769
770 /* Drop read data */
771 pa_sink_input_drop(i, result->length);
772
773 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
774
775 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
776 void *ostate = NULL;
777 pa_source_output *o;
778 pa_memchunk c;
779
780 if (m && m->chunk.memblock) {
781 c = m->chunk;
782 pa_memblock_ref(c.memblock);
783 pa_assert(result->length <= c.length);
784 c.length = result->length;
785
786 pa_memchunk_make_writable(&c, 0);
787 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
788 } else {
789 c = s->silence;
790 pa_memblock_ref(c.memblock);
791 pa_assert(result->length <= c.length);
792 c.length = result->length;
793 }
794
795 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
796 pa_source_output_assert_ref(o);
797 pa_assert(o->direct_on_input == i);
798 pa_source_post_direct(s->monitor_source, o, &c);
799 }
800
801 pa_memblock_unref(c.memblock);
802 }
803 }
804
805 if (m) {
806 if (m->chunk.memblock)
807 pa_memblock_unref(m->chunk.memblock);
808 pa_memchunk_reset(&m->chunk);
809
810 pa_sink_input_unref(m->userdata);
811 m->userdata = NULL;
812
813 n_unreffed += 1;
814 }
815 }
816
817 /* Now drop references to entries that are included in the
818 * pa_mix_info array but don't exist anymore */
819
820 if (n_unreffed < n) {
821 for (; n > 0; info++, n--) {
822 if (info->userdata)
823 pa_sink_input_unref(info->userdata);
824 if (info->chunk.memblock)
825 pa_memblock_unref(info->chunk.memblock);
826 }
827 }
828
829 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
830 pa_source_post(s->monitor_source, result);
831 }
832
833 /* Called from IO thread context */
834 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
835 pa_mix_info info[MAX_MIX_CHANNELS];
836 unsigned n;
837 size_t block_size_max;
838
839 pa_sink_assert_ref(s);
840 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
841 pa_assert(pa_frame_aligned(length, &s->sample_spec));
842 pa_assert(result);
843
844 pa_sink_ref(s);
845
846 pa_assert(!s->thread_info.rewind_requested);
847 pa_assert(s->thread_info.rewind_nbytes == 0);
848
849 if (s->thread_info.state == PA_SINK_SUSPENDED) {
850 result->memblock = pa_memblock_ref(s->silence.memblock);
851 result->index = s->silence.index;
852 result->length = PA_MIN(s->silence.length, length);
853 return;
854 }
855
856 if (length <= 0)
857 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
858
859 block_size_max = pa_mempool_block_size_max(s->core->mempool);
860 if (length > block_size_max)
861 length = pa_frame_align(block_size_max, &s->sample_spec);
862
863 pa_assert(length > 0);
864
865 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
866
867 if (n == 0) {
868
869 *result = s->silence;
870 pa_memblock_ref(result->memblock);
871
872 if (result->length > length)
873 result->length = length;
874
875 } else if (n == 1) {
876 pa_cvolume volume;
877
878 *result = info[0].chunk;
879 pa_memblock_ref(result->memblock);
880
881 if (result->length > length)
882 result->length = length;
883
884 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
885
886 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
887 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
888 pa_memblock_unref(result->memblock);
889 pa_silence_memchunk_get(&s->core->silence_cache,
890 s->core->mempool,
891 result,
892 &s->sample_spec,
893 result->length);
894 } else {
895 pa_memchunk_make_writable(result, 0);
896 pa_volume_memchunk(result, &s->sample_spec, &volume);
897 }
898 }
899 } else {
900 void *ptr;
901 result->memblock = pa_memblock_new(s->core->mempool, length);
902
903 ptr = pa_memblock_acquire(result->memblock);
904 result->length = pa_mix(info, n,
905 ptr, length,
906 &s->sample_spec,
907 &s->thread_info.soft_volume,
908 s->thread_info.soft_muted);
909 pa_memblock_release(result->memblock);
910
911 result->index = 0;
912 }
913
914 inputs_drop(s, info, n, result);
915
916 pa_sink_unref(s);
917 }
918
919 /* Called from IO thread context */
920 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
921 pa_mix_info info[MAX_MIX_CHANNELS];
922 unsigned n;
923 size_t length, block_size_max;
924
925 pa_sink_assert_ref(s);
926 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
927 pa_assert(target);
928 pa_assert(target->memblock);
929 pa_assert(target->length > 0);
930 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
931
932 pa_sink_ref(s);
933
934 pa_assert(!s->thread_info.rewind_requested);
935 pa_assert(s->thread_info.rewind_nbytes == 0);
936
937 if (s->thread_info.state == PA_SINK_SUSPENDED) {
938 pa_silence_memchunk(target, &s->sample_spec);
939 return;
940 }
941
942 length = target->length;
943 block_size_max = pa_mempool_block_size_max(s->core->mempool);
944 if (length > block_size_max)
945 length = pa_frame_align(block_size_max, &s->sample_spec);
946
947 pa_assert(length > 0);
948
949 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
950
951 if (n == 0) {
952 if (target->length > length)
953 target->length = length;
954
955 pa_silence_memchunk(target, &s->sample_spec);
956 } else if (n == 1) {
957 pa_cvolume volume;
958
959 if (target->length > length)
960 target->length = length;
961
962 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
963
964 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
965 pa_silence_memchunk(target, &s->sample_spec);
966 else {
967 pa_memchunk vchunk;
968
969 vchunk = info[0].chunk;
970 pa_memblock_ref(vchunk.memblock);
971
972 if (vchunk.length > length)
973 vchunk.length = length;
974
975 if (!pa_cvolume_is_norm(&volume)) {
976 pa_memchunk_make_writable(&vchunk, 0);
977 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
978 }
979
980 pa_memchunk_memcpy(target, &vchunk);
981 pa_memblock_unref(vchunk.memblock);
982 }
983
984 } else {
985 void *ptr;
986
987 ptr = pa_memblock_acquire(target->memblock);
988
989 target->length = pa_mix(info, n,
990 (uint8_t*) ptr + target->index, length,
991 &s->sample_spec,
992 &s->thread_info.soft_volume,
993 s->thread_info.soft_muted);
994
995 pa_memblock_release(target->memblock);
996 }
997
998 inputs_drop(s, info, n, target);
999
1000 pa_sink_unref(s);
1001 }
1002
1003 /* Called from IO thread context */
1004 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1005 pa_memchunk chunk;
1006 size_t l, d;
1007
1008 pa_sink_assert_ref(s);
1009 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1010 pa_assert(target);
1011 pa_assert(target->memblock);
1012 pa_assert(target->length > 0);
1013 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1014
1015 pa_sink_ref(s);
1016
1017 pa_assert(!s->thread_info.rewind_requested);
1018 pa_assert(s->thread_info.rewind_nbytes == 0);
1019
1020 l = target->length;
1021 d = 0;
1022 while (l > 0) {
1023 chunk = *target;
1024 chunk.index += d;
1025 chunk.length -= d;
1026
1027 pa_sink_render_into(s, &chunk);
1028
1029 d += chunk.length;
1030 l -= chunk.length;
1031 }
1032
1033 pa_sink_unref(s);
1034 }
1035
1036 /* Called from IO thread context */
1037 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1038 pa_mix_info info[MAX_MIX_CHANNELS];
1039 size_t length1st = length;
1040 unsigned n;
1041
1042 pa_sink_assert_ref(s);
1043 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1044 pa_assert(length > 0);
1045 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1046 pa_assert(result);
1047
1048 pa_sink_ref(s);
1049
1050 pa_assert(!s->thread_info.rewind_requested);
1051 pa_assert(s->thread_info.rewind_nbytes == 0);
1052
1053 pa_assert(length > 0);
1054
1055 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1056
1057 if (n == 0) {
1058 pa_silence_memchunk_get(&s->core->silence_cache,
1059 s->core->mempool,
1060 result,
1061 &s->sample_spec,
1062 length1st);
1063 } else if (n == 1) {
1064 pa_cvolume volume;
1065
1066 *result = info[0].chunk;
1067 pa_memblock_ref(result->memblock);
1068
1069 if (result->length > length)
1070 result->length = length;
1071
1072 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1073
1074 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1075 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1076 pa_memblock_unref(result->memblock);
1077 pa_silence_memchunk_get(&s->core->silence_cache,
1078 s->core->mempool,
1079 result,
1080 &s->sample_spec,
1081 result->length);
1082 } else {
1083 pa_memchunk_make_writable(result, length);
1084 pa_volume_memchunk(result, &s->sample_spec, &volume);
1085 }
1086 }
1087 } else {
1088 void *ptr;
1089
1090 result->index = 0;
1091 result->memblock = pa_memblock_new(s->core->mempool, length);
1092
1093 ptr = pa_memblock_acquire(result->memblock);
1094
1095 result->length = pa_mix(info, n,
1096 (uint8_t*) ptr + result->index, length1st,
1097 &s->sample_spec,
1098 &s->thread_info.soft_volume,
1099 s->thread_info.soft_muted);
1100
1101 pa_memblock_release(result->memblock);
1102 }
1103
1104 inputs_drop(s, info, n, result);
1105
1106 if (result->length < length) {
1107 pa_memchunk chunk;
1108 size_t l, d;
1109 pa_memchunk_make_writable(result, length);
1110
1111 l = length - result->length;
1112 d = result->index + result->length;
1113 while (l > 0) {
1114 chunk = *result;
1115 chunk.index = d;
1116 chunk.length = l;
1117
1118 pa_sink_render_into(s, &chunk);
1119
1120 d += chunk.length;
1121 l -= chunk.length;
1122 }
1123 result->length = length;
1124 }
1125
1126 pa_sink_unref(s);
1127 }
1128
1129 /* Called from main thread */
1130 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1131 pa_usec_t usec = 0;
1132
1133 pa_sink_assert_ref(s);
1134 pa_assert(PA_SINK_IS_LINKED(s->state));
1135
1136 /* The returned value is supposed to be in the time domain of the sound card! */
1137
1138 if (s->state == PA_SINK_SUSPENDED)
1139 return 0;
1140
1141 if (!(s->flags & PA_SINK_LATENCY))
1142 return 0;
1143
1144 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1145
1146 return usec;
1147 }
1148
1149 /* Called from IO thread */
1150 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1151 pa_usec_t usec = 0;
1152 pa_msgobject *o;
1153
1154 pa_sink_assert_ref(s);
1155 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1156
1157 /* The returned value is supposed to be in the time domain of the sound card! */
1158
1159 if (s->thread_info.state == PA_SINK_SUSPENDED)
1160 return 0;
1161
1162 if (!(s->flags & PA_SINK_LATENCY))
1163 return 0;
1164
1165 o = PA_MSGOBJECT(s);
1166
1167 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1168
1169 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1170 return -1;
1171
1172 return usec;
1173 }
1174
1175 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1176 unsigned c;
1177
1178 pa_sink_input_assert_ref(i);
1179 pa_assert(new_volume->channels == i->sample_spec.channels);
1180
1181 /*
1182 * This basically calculates:
1183 *
1184 * i->relative_volume := i->virtual_volume / new_volume
1185 * i->soft_volume := i->relative_volume * i->volume_factor
1186 */
1187
1188 /* The new sink volume passed in here must already be remapped to
1189 * the sink input's channel map! */
1190
1191 i->soft_volume.channels = i->sample_spec.channels;
1192
1193 for (c = 0; c < i->sample_spec.channels; c++)
1194
1195 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1196 /* We leave i->relative_volume untouched */
1197 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1198 else {
1199 i->relative_volume[c] =
1200 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1201 pa_sw_volume_to_linear(new_volume->values[c]);
1202
1203 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1204 i->relative_volume[c] *
1205 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1206 }
1207
1208 /* Hooks have the ability to play games with i->soft_volume */
1209 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1210
1211 /* We don't copy the soft_volume to the thread_info data
1212 * here. That must be done by the caller */
1213 }
1214
1215 /* Called from main thread */
1216 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1217 pa_sink_input *i;
1218 uint32_t idx;
1219
1220 pa_sink_assert_ref(s);
1221 pa_assert(new_volume);
1222 pa_assert(PA_SINK_IS_LINKED(s->state));
1223 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1224
1225 /* This is called whenever a sink input volume changes or a sink
1226 * input is added/removed and we might need to fix up the sink
1227 * volume accordingly. Please note that we don't actually update
1228 * the sinks volume here, we only return how it needs to be
1229 * updated. The caller should then call pa_sink_set_volume().*/
1230
1231 if (pa_idxset_isempty(s->inputs)) {
1232 /* In the special case that we have no sink input we leave the
1233 * volume unmodified. */
1234 *new_volume = s->reference_volume;
1235 return;
1236 }
1237
1238 pa_cvolume_mute(new_volume, s->channel_map.channels);
1239
1240 /* First let's determine the new maximum volume of all inputs
1241 * connected to this sink */
1242 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1243 unsigned c;
1244 pa_cvolume remapped_volume;
1245
1246 remapped_volume = i->virtual_volume;
1247 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1248
1249 for (c = 0; c < new_volume->channels; c++)
1250 if (remapped_volume.values[c] > new_volume->values[c])
1251 new_volume->values[c] = remapped_volume.values[c];
1252 }
1253
1254 /* Then, let's update the soft volumes of all inputs connected
1255 * to this sink */
1256 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1257 pa_cvolume remapped_new_volume;
1258
1259 remapped_new_volume = *new_volume;
1260 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1261 compute_new_soft_volume(i, &remapped_new_volume);
1262
1263 /* We don't copy soft_volume to the thread_info data here
1264 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1265 * want the update to be atomically with the sink volume
1266 * update, hence we do it within the pa_sink_set_volume() call
1267 * below */
1268 }
1269 }
1270
1271 /* Called from main thread */
1272 void pa_sink_propagate_flat_volume(pa_sink *s) {
1273 pa_sink_input *i;
1274 uint32_t idx;
1275
1276 pa_sink_assert_ref(s);
1277 pa_assert(PA_SINK_IS_LINKED(s->state));
1278 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1279
1280 /* This is called whenever the sink volume changes that is not
1281 * caused by a sink input volume change. We need to fix up the
1282 * sink input volumes accordingly */
1283
1284 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1285 pa_cvolume sink_volume, new_virtual_volume;
1286 unsigned c;
1287
1288 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1289
1290 sink_volume = s->virtual_volume;
1291 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1292
1293 for (c = 0; c < i->sample_spec.channels; c++)
1294 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1295 i->relative_volume[c] *
1296 pa_sw_volume_to_linear(sink_volume.values[c]));
1297
1298 new_virtual_volume.channels = i->sample_spec.channels;
1299
1300 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1301 i->virtual_volume = new_virtual_volume;
1302
1303 /* Hmm, the soft volume might no longer actually match
1304 * what has been chosen as new virtual volume here,
1305 * especially when the old volume was
1306 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1307 * volumes here. */
1308 compute_new_soft_volume(i, &sink_volume);
1309
1310 /* The virtual volume changed, let's tell people so */
1311 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1312 }
1313 }
1314
1315 /* If the soft_volume of any of the sink inputs got changed, let's
1316 * make sure the thread copies are synced up. */
1317 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1318 }
1319
1320 /* Called from main thread */
1321 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1322 pa_bool_t virtual_volume_changed;
1323
1324 pa_sink_assert_ref(s);
1325 pa_assert(PA_SINK_IS_LINKED(s->state));
1326 pa_assert(volume);
1327 pa_assert(pa_cvolume_valid(volume));
1328 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1329
1330 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1331 s->virtual_volume = *volume;
1332 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1333
1334 if (become_reference)
1335 s->reference_volume = s->virtual_volume;
1336
1337 /* Propagate this volume change back to the inputs */
1338 if (virtual_volume_changed)
1339 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1340 pa_sink_propagate_flat_volume(s);
1341
1342 if (s->set_volume) {
1343 /* If we have a function set_volume(), then we do not apply a
1344 * soft volume by default. However, set_volume() is free to
1345 * apply one to s->soft_volume */
1346
1347 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1348 s->set_volume(s);
1349
1350 } else
1351 /* If we have no function set_volume(), then the soft volume
1352 * becomes the virtual volume */
1353 s->soft_volume = s->virtual_volume;
1354
1355 /* This tells the sink that soft and/or virtual volume changed */
1356 if (sendmsg)
1357 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1358
1359 if (virtual_volume_changed)
1360 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1361 }
1362
1363 /* Called from main thread. Only to be called by sink implementor */
1364 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1365 pa_sink_assert_ref(s);
1366 pa_assert(volume);
1367
1368 s->soft_volume = *volume;
1369
1370 if (PA_SINK_IS_LINKED(s->state))
1371 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1372 else
1373 s->thread_info.soft_volume = *volume;
1374 }
1375
1376 /* Called from main thread */
1377 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1378 pa_sink_assert_ref(s);
1379
1380 if (s->refresh_volume || force_refresh) {
1381 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1382
1383 if (s->get_volume)
1384 s->get_volume(s);
1385
1386 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1387
1388 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1389
1390 s->reference_volume = s->virtual_volume;
1391
1392 /* Something got changed in the hardware. It probably
1393 * makes sense to save changed hw settings given that hw
1394 * volume changes not triggered by PA are almost certainly
1395 * done by the user. */
1396 s->save_volume = TRUE;
1397
1398 if (s->flags & PA_SINK_FLAT_VOLUME)
1399 pa_sink_propagate_flat_volume(s);
1400
1401 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1402 }
1403 }
1404
1405 return reference ? &s->reference_volume : &s->virtual_volume;
1406 }
1407
1408 /* Called from main thread */
1409 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1410 pa_sink_assert_ref(s);
1411
1412 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1413 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1414 return;
1415
1416 s->reference_volume = s->virtual_volume = *new_volume;
1417 s->save_volume = TRUE;
1418
1419 if (s->flags & PA_SINK_FLAT_VOLUME)
1420 pa_sink_propagate_flat_volume(s);
1421
1422 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1423 }
1424
1425 /* Called from main thread */
1426 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1427 pa_bool_t old_muted;
1428
1429 pa_sink_assert_ref(s);
1430 pa_assert(PA_SINK_IS_LINKED(s->state));
1431
1432 old_muted = s->muted;
1433 s->muted = mute;
1434 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1435
1436 if (s->set_mute)
1437 s->set_mute(s);
1438
1439 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1440
1441 if (old_muted != s->muted)
1442 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1443 }
1444
1445 /* Called from main thread */
1446 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1447
1448 pa_sink_assert_ref(s);
1449
1450 if (s->refresh_muted || force_refresh) {
1451 pa_bool_t old_muted = s->muted;
1452
1453 if (s->get_mute)
1454 s->get_mute(s);
1455
1456 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1457
1458 if (old_muted != s->muted) {
1459 s->save_muted = TRUE;
1460
1461 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1462
1463 /* Make sure the soft mute status stays in sync */
1464 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1465 }
1466 }
1467
1468
1469 return s->muted;
1470 }
1471
1472 /* Called from main thread */
1473 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1474 pa_sink_assert_ref(s);
1475
1476 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1477
1478 if (s->muted == new_muted)
1479 return;
1480
1481 s->muted = new_muted;
1482 s->save_muted = TRUE;
1483
1484 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1485 }
1486
1487 /* Called from main thread */
1488 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1489 pa_sink_assert_ref(s);
1490
1491 if (p)
1492 pa_proplist_update(s->proplist, mode, p);
1493
1494 if (PA_SINK_IS_LINKED(s->state)) {
1495 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1496 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1497 }
1498
1499 return TRUE;
1500 }
1501
1502 /* Called from main thread */
1503 void pa_sink_set_description(pa_sink *s, const char *description) {
1504 const char *old;
1505 pa_sink_assert_ref(s);
1506
1507 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1508 return;
1509
1510 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1511
1512 if (old && description && !strcmp(old, description))
1513 return;
1514
1515 if (description)
1516 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1517 else
1518 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1519
1520 if (s->monitor_source) {
1521 char *n;
1522
1523 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1524 pa_source_set_description(s->monitor_source, n);
1525 pa_xfree(n);
1526 }
1527
1528 if (PA_SINK_IS_LINKED(s->state)) {
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1530 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1531 }
1532 }
1533
1534 /* Called from main thread */
1535 unsigned pa_sink_linked_by(pa_sink *s) {
1536 unsigned ret;
1537
1538 pa_sink_assert_ref(s);
1539 pa_assert(PA_SINK_IS_LINKED(s->state));
1540
1541 ret = pa_idxset_size(s->inputs);
1542
1543 /* We add in the number of streams connected to us here. Please
1544 * note the asymmmetry to pa_sink_used_by()! */
1545
1546 if (s->monitor_source)
1547 ret += pa_source_linked_by(s->monitor_source);
1548
1549 return ret;
1550 }
1551
1552 /* Called from main thread */
1553 unsigned pa_sink_used_by(pa_sink *s) {
1554 unsigned ret;
1555
1556 pa_sink_assert_ref(s);
1557 pa_assert(PA_SINK_IS_LINKED(s->state));
1558
1559 ret = pa_idxset_size(s->inputs);
1560 pa_assert(ret >= s->n_corked);
1561
1562 /* Streams connected to our monitor source do not matter for
1563 * pa_sink_used_by()!.*/
1564
1565 return ret - s->n_corked;
1566 }
1567
1568 /* Called from main thread */
1569 unsigned pa_sink_check_suspend(pa_sink *s) {
1570 unsigned ret;
1571 pa_sink_input *i;
1572 uint32_t idx;
1573
1574 pa_sink_assert_ref(s);
1575
1576 if (!PA_SINK_IS_LINKED(s->state))
1577 return 0;
1578
1579 ret = 0;
1580
1581 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1582 pa_sink_input_state_t st;
1583
1584 st = pa_sink_input_get_state(i);
1585 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1586
1587 if (st == PA_SINK_INPUT_CORKED)
1588 continue;
1589
1590 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1591 continue;
1592
1593 ret ++;
1594 }
1595
1596 if (s->monitor_source)
1597 ret += pa_source_check_suspend(s->monitor_source);
1598
1599 return ret;
1600 }
1601
1602 /* Called from the IO thread */
1603 static void sync_input_volumes_within_thread(pa_sink *s) {
1604 pa_sink_input *i;
1605 void *state = NULL;
1606
1607 pa_sink_assert_ref(s);
1608
1609 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1610 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1611 continue;
1612
1613 i->thread_info.soft_volume = i->soft_volume;
1614 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1615 }
1616 }
1617
1618 /* Called from IO thread, except when it is not */
1619 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1620 pa_sink *s = PA_SINK(o);
1621 pa_sink_assert_ref(s);
1622
1623 switch ((pa_sink_message_t) code) {
1624
1625 case PA_SINK_MESSAGE_ADD_INPUT: {
1626 pa_sink_input *i = PA_SINK_INPUT(userdata);
1627
1628 /* If you change anything here, make sure to change the
1629 * sink input handling a few lines down at
1630 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1631
1632 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1633
1634 /* Since the caller sleeps in pa_sink_input_put(), we can
1635 * safely access data outside of thread_info even though
1636 * it is mutable */
1637
1638 if ((i->thread_info.sync_prev = i->sync_prev)) {
1639 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1640 pa_assert(i->sync_prev->sync_next == i);
1641 i->thread_info.sync_prev->thread_info.sync_next = i;
1642 }
1643
1644 if ((i->thread_info.sync_next = i->sync_next)) {
1645 pa_assert(i->sink == i->thread_info.sync_next->sink);
1646 pa_assert(i->sync_next->sync_prev == i);
1647 i->thread_info.sync_next->thread_info.sync_prev = i;
1648 }
1649
1650 pa_assert(!i->thread_info.attached);
1651 i->thread_info.attached = TRUE;
1652
1653 if (i->attach)
1654 i->attach(i);
1655
1656 pa_sink_input_set_state_within_thread(i, i->state);
1657
1658 /* The requested latency of the sink input needs to be
1659 * fixed up and then configured on the sink */
1660
1661 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1662 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1663
1664 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1665 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1666
1667 /* We don't rewind here automatically. This is left to the
1668 * sink input implementor because some sink inputs need a
1669 * slow start, i.e. need some time to buffer client
1670 * samples before beginning streaming. */
1671
1672 /* In flat volume mode we need to update the volume as
1673 * well */
1674 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1675 }
1676
1677 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1678 pa_sink_input *i = PA_SINK_INPUT(userdata);
1679
1680 /* If you change anything here, make sure to change the
1681 * sink input handling a few lines down at
1682 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1683
1684 if (i->detach)
1685 i->detach(i);
1686
1687 pa_sink_input_set_state_within_thread(i, i->state);
1688
1689 pa_assert(i->thread_info.attached);
1690 i->thread_info.attached = FALSE;
1691
1692 /* Since the caller sleeps in pa_sink_input_unlink(),
1693 * we can safely access data outside of thread_info even
1694 * though it is mutable */
1695
1696 pa_assert(!i->sync_prev);
1697 pa_assert(!i->sync_next);
1698
1699 if (i->thread_info.sync_prev) {
1700 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1701 i->thread_info.sync_prev = NULL;
1702 }
1703
1704 if (i->thread_info.sync_next) {
1705 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1706 i->thread_info.sync_next = NULL;
1707 }
1708
1709 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1710 pa_sink_input_unref(i);
1711
1712 pa_sink_invalidate_requested_latency(s);
1713 pa_sink_request_rewind(s, (size_t) -1);
1714
1715 /* In flat volume mode we need to update the volume as
1716 * well */
1717 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1718 }
1719
1720 case PA_SINK_MESSAGE_START_MOVE: {
1721 pa_sink_input *i = PA_SINK_INPUT(userdata);
1722
1723 /* We don't support moving synchronized streams. */
1724 pa_assert(!i->sync_prev);
1725 pa_assert(!i->sync_next);
1726 pa_assert(!i->thread_info.sync_next);
1727 pa_assert(!i->thread_info.sync_prev);
1728
1729 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1730 pa_usec_t usec = 0;
1731 size_t sink_nbytes, total_nbytes;
1732
1733 /* Get the latency of the sink */
1734 if (!(s->flags & PA_SINK_LATENCY) ||
1735 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1736 usec = 0;
1737
1738 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1739 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1740
1741 if (total_nbytes > 0) {
1742 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1743 i->thread_info.rewrite_flush = TRUE;
1744 pa_sink_input_process_rewind(i, sink_nbytes);
1745 }
1746 }
1747
1748 if (i->detach)
1749 i->detach(i);
1750
1751 pa_assert(i->thread_info.attached);
1752 i->thread_info.attached = FALSE;
1753
1754 /* Let's remove the sink input ...*/
1755 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1756 pa_sink_input_unref(i);
1757
1758 pa_sink_invalidate_requested_latency(s);
1759
1760 pa_log_debug("Requesting rewind due to started move");
1761 pa_sink_request_rewind(s, (size_t) -1);
1762
1763 /* In flat volume mode we need to update the volume as
1764 * well */
1765 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1766 }
1767
1768 case PA_SINK_MESSAGE_FINISH_MOVE: {
1769 pa_sink_input *i = PA_SINK_INPUT(userdata);
1770
1771 /* We don't support moving synchronized streams. */
1772 pa_assert(!i->sync_prev);
1773 pa_assert(!i->sync_next);
1774 pa_assert(!i->thread_info.sync_next);
1775 pa_assert(!i->thread_info.sync_prev);
1776
1777 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1778
1779 pa_assert(!i->thread_info.attached);
1780 i->thread_info.attached = TRUE;
1781
1782 if (i->attach)
1783 i->attach(i);
1784
1785 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1786 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1787
1788 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1789 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1790
1791 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1792 pa_usec_t usec = 0;
1793 size_t nbytes;
1794
1795 /* Get the latency of the sink */
1796 if (!(s->flags & PA_SINK_LATENCY) ||
1797 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1798 usec = 0;
1799
1800 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1801
1802 if (nbytes > 0)
1803 pa_sink_input_drop(i, nbytes);
1804
1805 pa_log_debug("Requesting rewind due to finished move");
1806 pa_sink_request_rewind(s, nbytes);
1807 }
1808
1809 /* In flat volume mode we need to update the volume as
1810 * well */
1811 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1812 }
1813
1814 case PA_SINK_MESSAGE_SET_VOLUME:
1815
1816 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1817 s->thread_info.soft_volume = s->soft_volume;
1818 pa_sink_request_rewind(s, (size_t) -1);
1819 }
1820
1821 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1822 return 0;
1823
1824 /* Fall through ... */
1825
1826 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1827 sync_input_volumes_within_thread(s);
1828 return 0;
1829
1830 case PA_SINK_MESSAGE_GET_VOLUME:
1831 return 0;
1832
1833 case PA_SINK_MESSAGE_SET_MUTE:
1834
1835 if (s->thread_info.soft_muted != s->muted) {
1836 s->thread_info.soft_muted = s->muted;
1837 pa_sink_request_rewind(s, (size_t) -1);
1838 }
1839
1840 return 0;
1841
1842 case PA_SINK_MESSAGE_GET_MUTE:
1843 return 0;
1844
1845 case PA_SINK_MESSAGE_SET_STATE: {
1846
1847 pa_bool_t suspend_change =
1848 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1849 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1850
1851 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1852
1853 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1854 s->thread_info.rewind_nbytes = 0;
1855 s->thread_info.rewind_requested = FALSE;
1856 }
1857
1858 if (suspend_change) {
1859 pa_sink_input *i;
1860 void *state = NULL;
1861
1862 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1863 if (i->suspend_within_thread)
1864 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1865 }
1866
1867 return 0;
1868 }
1869
1870 case PA_SINK_MESSAGE_DETACH:
1871
1872 /* Detach all streams */
1873 pa_sink_detach_within_thread(s);
1874 return 0;
1875
1876 case PA_SINK_MESSAGE_ATTACH:
1877
1878 /* Reattach all streams */
1879 pa_sink_attach_within_thread(s);
1880 return 0;
1881
1882 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1883
1884 pa_usec_t *usec = userdata;
1885 *usec = pa_sink_get_requested_latency_within_thread(s);
1886
1887 if (*usec == (pa_usec_t) -1)
1888 *usec = s->thread_info.max_latency;
1889
1890 return 0;
1891 }
1892
1893 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1894 pa_usec_t *r = userdata;
1895
1896 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1897
1898 return 0;
1899 }
1900
1901 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1902 pa_usec_t *r = userdata;
1903
1904 r[0] = s->thread_info.min_latency;
1905 r[1] = s->thread_info.max_latency;
1906
1907 return 0;
1908 }
1909
1910 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1911
1912 *((size_t*) userdata) = s->thread_info.max_rewind;
1913 return 0;
1914
1915 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1916
1917 *((size_t*) userdata) = s->thread_info.max_request;
1918 return 0;
1919
1920 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1921
1922 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1923 return 0;
1924
1925 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1926
1927 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1928 return 0;
1929
1930 case PA_SINK_MESSAGE_GET_LATENCY:
1931 case PA_SINK_MESSAGE_MAX:
1932 ;
1933 }
1934
1935 return -1;
1936 }
1937
1938 /* Called from main thread */
1939 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1940 pa_sink *sink;
1941 uint32_t idx;
1942 int ret = 0;
1943
1944 pa_core_assert_ref(c);
1945 pa_assert(cause != 0);
1946
1947 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1948 int r;
1949
1950 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1951 ret = r;
1952 }
1953
1954 return ret;
1955 }
1956
1957 /* Called from main thread */
1958 void pa_sink_detach(pa_sink *s) {
1959 pa_sink_assert_ref(s);
1960 pa_assert(PA_SINK_IS_LINKED(s->state));
1961
1962 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1963 }
1964
1965 /* Called from main thread */
1966 void pa_sink_attach(pa_sink *s) {
1967 pa_sink_assert_ref(s);
1968 pa_assert(PA_SINK_IS_LINKED(s->state));
1969
1970 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1971 }
1972
1973 /* Called from IO thread */
1974 void pa_sink_detach_within_thread(pa_sink *s) {
1975 pa_sink_input *i;
1976 void *state = NULL;
1977
1978 pa_sink_assert_ref(s);
1979 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1980
1981 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1982 if (i->detach)
1983 i->detach(i);
1984
1985 if (s->monitor_source)
1986 pa_source_detach_within_thread(s->monitor_source);
1987 }
1988
1989 /* Called from IO thread */
1990 void pa_sink_attach_within_thread(pa_sink *s) {
1991 pa_sink_input *i;
1992 void *state = NULL;
1993
1994 pa_sink_assert_ref(s);
1995 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1996
1997 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1998 if (i->attach)
1999 i->attach(i);
2000
2001 if (s->monitor_source)
2002 pa_source_attach_within_thread(s->monitor_source);
2003 }
2004
2005 /* Called from IO thread */
2006 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2007 pa_sink_assert_ref(s);
2008 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2009
2010 if (s->thread_info.state == PA_SINK_SUSPENDED)
2011 return;
2012
2013 if (nbytes == (size_t) -1)
2014 nbytes = s->thread_info.max_rewind;
2015
2016 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2017
2018 if (s->thread_info.rewind_requested &&
2019 nbytes <= s->thread_info.rewind_nbytes)
2020 return;
2021
2022 s->thread_info.rewind_nbytes = nbytes;
2023 s->thread_info.rewind_requested = TRUE;
2024
2025 if (s->request_rewind)
2026 s->request_rewind(s);
2027 }
2028
2029 /* Called from IO thread */
2030 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2031 pa_usec_t result = (pa_usec_t) -1;
2032 pa_sink_input *i;
2033 void *state = NULL;
2034 pa_usec_t monitor_latency;
2035
2036 pa_sink_assert_ref(s);
2037
2038 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2039 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2040
2041 if (s->thread_info.requested_latency_valid)
2042 return s->thread_info.requested_latency;
2043
2044 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2045
2046 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2047 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2048 result = i->thread_info.requested_sink_latency;
2049
2050 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2051
2052 if (monitor_latency != (pa_usec_t) -1 &&
2053 (result == (pa_usec_t) -1 || result > monitor_latency))
2054 result = monitor_latency;
2055
2056 if (result != (pa_usec_t) -1)
2057 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2058
2059 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2060 /* Only cache if properly initialized */
2061 s->thread_info.requested_latency = result;
2062 s->thread_info.requested_latency_valid = TRUE;
2063 }
2064
2065 return result;
2066 }
2067
2068 /* Called from main thread */
2069 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2070 pa_usec_t usec = 0;
2071
2072 pa_sink_assert_ref(s);
2073 pa_assert(PA_SINK_IS_LINKED(s->state));
2074
2075 if (s->state == PA_SINK_SUSPENDED)
2076 return 0;
2077
2078 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2079 return usec;
2080 }
2081
2082 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2083 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2084 pa_sink_input *i;
2085 void *state = NULL;
2086
2087 pa_sink_assert_ref(s);
2088
2089 if (max_rewind == s->thread_info.max_rewind)
2090 return;
2091
2092 s->thread_info.max_rewind = max_rewind;
2093
2094 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2095 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2096 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2097 }
2098
2099 if (s->monitor_source)
2100 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2101 }
2102
2103 /* Called from main thread */
2104 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2105 pa_sink_assert_ref(s);
2106
2107 if (PA_SINK_IS_LINKED(s->state))
2108 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2109 else
2110 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2111 }
2112
2113 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2114 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2115 void *state = NULL;
2116
2117 pa_sink_assert_ref(s);
2118
2119 if (max_request == s->thread_info.max_request)
2120 return;
2121
2122 s->thread_info.max_request = max_request;
2123
2124 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2125 pa_sink_input *i;
2126
2127 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2128 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2129 }
2130 }
2131
2132 /* Called from main thread */
2133 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2134 pa_sink_assert_ref(s);
2135
2136 if (PA_SINK_IS_LINKED(s->state))
2137 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2138 else
2139 pa_sink_set_max_request_within_thread(s, max_request);
2140 }
2141
2142 /* Called from IO thread */
2143 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2144 pa_sink_input *i;
2145 void *state = NULL;
2146
2147 pa_sink_assert_ref(s);
2148
2149 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2150 return;
2151
2152 s->thread_info.requested_latency_valid = FALSE;
2153
2154 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2155
2156 if (s->update_requested_latency)
2157 s->update_requested_latency(s);
2158
2159 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2160 if (i->update_sink_requested_latency)
2161 i->update_sink_requested_latency(i);
2162 }
2163 }
2164
2165 /* Called from main thread */
2166 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2167 pa_sink_assert_ref(s);
2168
2169 /* min_latency == 0: no limit
2170 * min_latency anything else: specified limit
2171 *
2172 * Similar for max_latency */
2173
2174 if (min_latency < ABSOLUTE_MIN_LATENCY)
2175 min_latency = ABSOLUTE_MIN_LATENCY;
2176
2177 if (max_latency <= 0 ||
2178 max_latency > ABSOLUTE_MAX_LATENCY)
2179 max_latency = ABSOLUTE_MAX_LATENCY;
2180
2181 pa_assert(min_latency <= max_latency);
2182
2183 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2184 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2185 max_latency == ABSOLUTE_MAX_LATENCY) ||
2186 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2187
2188 if (PA_SINK_IS_LINKED(s->state)) {
2189 pa_usec_t r[2];
2190
2191 r[0] = min_latency;
2192 r[1] = max_latency;
2193
2194 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2195 } else
2196 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2197 }
2198
2199 /* Called from main thread */
2200 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2201 pa_sink_assert_ref(s);
2202 pa_assert(min_latency);
2203 pa_assert(max_latency);
2204
2205 if (PA_SINK_IS_LINKED(s->state)) {
2206 pa_usec_t r[2] = { 0, 0 };
2207
2208 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2209
2210 *min_latency = r[0];
2211 *max_latency = r[1];
2212 } else {
2213 *min_latency = s->thread_info.min_latency;
2214 *max_latency = s->thread_info.max_latency;
2215 }
2216 }
2217
2218 /* Called from IO thread */
2219 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2220 void *state = NULL;
2221
2222 pa_sink_assert_ref(s);
2223
2224 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2225 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2226 pa_assert(min_latency <= max_latency);
2227
2228 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2229 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2230 max_latency == ABSOLUTE_MAX_LATENCY) ||
2231 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2232
2233 s->thread_info.min_latency = min_latency;
2234 s->thread_info.max_latency = max_latency;
2235
2236 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2237 pa_sink_input *i;
2238
2239 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2240 if (i->update_sink_latency_range)
2241 i->update_sink_latency_range(i);
2242 }
2243
2244 pa_sink_invalidate_requested_latency(s);
2245
2246 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2247 }
2248
2249 /* Called from main thread, before the sink is put */
2250 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2251 pa_sink_assert_ref(s);
2252
2253 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2254
2255 if (latency < ABSOLUTE_MIN_LATENCY)
2256 latency = ABSOLUTE_MIN_LATENCY;
2257
2258 if (latency > ABSOLUTE_MAX_LATENCY)
2259 latency = ABSOLUTE_MAX_LATENCY;
2260
2261 s->fixed_latency = latency;
2262 pa_source_set_fixed_latency(s->monitor_source, latency);
2263 }
2264
2265 /* Called from main context */
2266 size_t pa_sink_get_max_rewind(pa_sink *s) {
2267 size_t r;
2268 pa_sink_assert_ref(s);
2269
2270 if (!PA_SINK_IS_LINKED(s->state))
2271 return s->thread_info.max_rewind;
2272
2273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2274
2275 return r;
2276 }
2277
2278 /* Called from main context */
2279 size_t pa_sink_get_max_request(pa_sink *s) {
2280 size_t r;
2281 pa_sink_assert_ref(s);
2282
2283 if (!PA_SINK_IS_LINKED(s->state))
2284 return s->thread_info.max_request;
2285
2286 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2287
2288 return r;
2289 }
2290
2291 /* Called from main context */
2292 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2293 pa_device_port *port;
2294
2295 pa_assert(s);
2296
2297 if (!s->set_port) {
2298 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2299 return -PA_ERR_NOTIMPLEMENTED;
2300 }
2301
2302 if (!s->ports)
2303 return -PA_ERR_NOENTITY;
2304
2305 if (!(port = pa_hashmap_get(s->ports, name)))
2306 return -PA_ERR_NOENTITY;
2307
2308 if (s->active_port == port) {
2309 s->save_port = s->save_port || save;
2310 return 0;
2311 }
2312
2313 if ((s->set_port(s, port)) < 0)
2314 return -PA_ERR_NOENTITY;
2315
2316 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2317
2318 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2319
2320 s->active_port = port;
2321 s->save_port = save;
2322
2323 return 0;
2324 }
2325
2326 /* Called from main context */
2327 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2328 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2329
2330 pa_assert(p);
2331
2332 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2333 return TRUE;
2334
2335 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2336
2337 if (pa_streq(ff, "microphone"))
2338 t = "audio-input-microphone";
2339 else if (pa_streq(ff, "webcam"))
2340 t = "camera-web";
2341 else if (pa_streq(ff, "computer"))
2342 t = "computer";
2343 else if (pa_streq(ff, "handset"))
2344 t = "phone";
2345 else if (pa_streq(ff, "portable"))
2346 t = "multimedia-player";
2347 else if (pa_streq(ff, "tv"))
2348 t = "video-display";
2349
2350 /*
2351 * The following icons are not part of the icon naming spec,
2352 * because Rodney Dawes sucks as the maintainer of that spec.
2353 *
2354 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2355 */
2356 else if (pa_streq(ff, "headset"))
2357 t = "audio-headset";
2358 else if (pa_streq(ff, "headphone"))
2359 t = "audio-headphones";
2360 else if (pa_streq(ff, "speaker"))
2361 t = "audio-speakers";
2362 else if (pa_streq(ff, "hands-free"))
2363 t = "audio-handsfree";
2364 }
2365
2366 if (!t)
2367 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2368 if (pa_streq(c, "modem"))
2369 t = "modem";
2370
2371 if (!t) {
2372 if (is_sink)
2373 t = "audio-card";
2374 else
2375 t = "audio-input-microphone";
2376 }
2377
2378 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2379 if (strstr(profile, "analog"))
2380 s = "-analog";
2381 else if (strstr(profile, "iec958"))
2382 s = "-iec958";
2383 else if (strstr(profile, "hdmi"))
2384 s = "-hdmi";
2385 }
2386
2387 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2388
2389 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2390
2391 return TRUE;
2392 }
2393
2394 pa_bool_t pa_device_init_description(pa_proplist *p) {
2395 const char *s, *d = NULL, *k;
2396 pa_assert(p);
2397
2398 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2399 return TRUE;
2400
2401 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2402 if (pa_streq(s, "internal"))
2403 d = _("Internal Audio");
2404
2405 if (!d)
2406 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2407 if (pa_streq(s, "modem"))
2408 d = _("Modem");
2409
2410 if (!d)
2411 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2412
2413 if (!d)
2414 return FALSE;
2415
2416 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2417
2418 if (d && k)
2419 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2420 else if (d)
2421 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2422
2423 return TRUE;
2424 }
2425
2426 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2427 const char *s;
2428 pa_assert(p);
2429
2430 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2431 return TRUE;
2432
2433 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2434 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2435 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2436 return TRUE;
2437 }
2438
2439 return FALSE;
2440 }