]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
Add volume ramping feature - sink modification
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 memset(data, 0, sizeof(*data));
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180
181 s = pa_msgobject_new(pa_sink);
182
183 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
184 pa_log_debug("Failed to register name %s.", data->name);
185 pa_xfree(s);
186 return NULL;
187 }
188
189 pa_sink_new_data_set_name(data, name);
190
191 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
192 pa_xfree(s);
193 pa_namereg_unregister(core, name);
194 return NULL;
195 }
196
197 /* FIXME, need to free s here on failure */
198
199 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
200 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
201
202 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
203
204 if (!data->channel_map_is_set)
205 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
206
207 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
208 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
209
210 if (!data->volume_is_set)
211 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
212
213 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
214 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
215
216 if (!data->muted_is_set)
217 data->muted = FALSE;
218
219 if (data->card)
220 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
221
222 pa_device_init_description(data->proplist);
223 pa_device_init_icon(data->proplist, TRUE);
224 pa_device_init_intended_roles(data->proplist);
225
226 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
227 pa_xfree(s);
228 pa_namereg_unregister(core, name);
229 return NULL;
230 }
231
232 s->parent.parent.free = sink_free;
233 s->parent.process_msg = pa_sink_process_msg;
234
235 s->core = core;
236 s->state = PA_SINK_INIT;
237 s->flags = flags;
238 s->suspend_cause = 0;
239 s->name = pa_xstrdup(name);
240 s->proplist = pa_proplist_copy(data->proplist);
241 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
242 s->module = data->module;
243 s->card = data->card;
244
245 s->sample_spec = data->sample_spec;
246 s->channel_map = data->channel_map;
247
248 s->inputs = pa_idxset_new(NULL, NULL);
249 s->n_corked = 0;
250
251 s->reference_volume = s->virtual_volume = data->volume;
252 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
253 s->base_volume = PA_VOLUME_NORM;
254 s->n_volume_steps = PA_VOLUME_NORM+1;
255 s->muted = data->muted;
256 s->refresh_volume = s->refresh_muted = FALSE;
257
258 s->fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
259
260 reset_callbacks(s);
261 s->userdata = NULL;
262
263 s->asyncmsgq = NULL;
264 s->rtpoll = NULL;
265
266 /* As a minor optimization we just steal the list instead of
267 * copying it here */
268 s->ports = data->ports;
269 data->ports = NULL;
270
271 s->active_port = NULL;
272 s->save_port = FALSE;
273
274 if (data->active_port && s->ports)
275 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
276 s->save_port = data->save_port;
277
278 if (!s->active_port && s->ports) {
279 void *state;
280 pa_device_port *p;
281
282 PA_HASHMAP_FOREACH(p, s->ports, state)
283 if (!s->active_port || p->priority > s->active_port->priority)
284 s->active_port = p;
285 }
286
287 s->save_volume = data->save_volume;
288 s->save_muted = data->save_muted;
289
290 pa_silence_memchunk_get(
291 &core->silence_cache,
292 core->mempool,
293 &s->silence,
294 &s->sample_spec,
295 0);
296
297 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
298 s->thread_info.soft_volume = s->soft_volume;
299 s->thread_info.soft_muted = s->muted;
300 s->thread_info.state = s->state;
301 s->thread_info.rewind_nbytes = 0;
302 s->thread_info.rewind_requested = FALSE;
303 s->thread_info.max_rewind = 0;
304 s->thread_info.max_request = 0;
305 s->thread_info.requested_latency_valid = FALSE;
306 s->thread_info.requested_latency = 0;
307 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
308 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
309
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
311
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
323
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
331
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339
340 pa_source_new_data_done(&source_data);
341
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
346 }
347
348 s->monitor_source->monitor_of = s;
349
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
352
353 return s;
354 }
355
356 /* Called from main context */
357 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
358 int ret;
359 pa_bool_t suspend_change;
360 pa_sink_state_t original_state;
361
362 pa_assert(s);
363
364 if (s->state == state)
365 return 0;
366
367 original_state = s->state;
368
369 suspend_change =
370 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
371 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
372
373 if (s->set_state)
374 if ((ret = s->set_state(s, state)) < 0)
375 return ret;
376
377 if (s->asyncmsgq)
378 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
379
380 if (s->set_state)
381 s->set_state(s, original_state);
382
383 return ret;
384 }
385
386 s->state = state;
387
388 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
389 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
390 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
391 }
392
393 if (suspend_change) {
394 pa_sink_input *i;
395 uint32_t idx;
396
397 /* We're suspending or resuming, tell everyone about it */
398
399 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx)))
400 if (s->state == PA_SINK_SUSPENDED &&
401 (i->flags & PA_SINK_INPUT_FAIL_ON_SUSPEND))
402 pa_sink_input_kill(i);
403 else if (i->suspend)
404 i->suspend(i, state == PA_SINK_SUSPENDED);
405
406 if (s->monitor_source)
407 pa_source_sync_suspend(s->monitor_source);
408 }
409
410 return 0;
411 }
412
413 /* Called from main context */
414 void pa_sink_put(pa_sink* s) {
415 pa_sink_assert_ref(s);
416
417 pa_assert(s->state == PA_SINK_INIT);
418
419 /* The following fields must be initialized properly when calling _put() */
420 pa_assert(s->asyncmsgq);
421 pa_assert(s->rtpoll);
422 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
423
424 /* Generally, flags should be initialized via pa_sink_new(). As a
425 * special exception we allow volume related flags to be set
426 * between _new() and _put(). */
427
428 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
429 s->flags |= PA_SINK_DECIBEL_VOLUME;
430
431 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
432 s->flags |= PA_SINK_FLAT_VOLUME;
433
434 s->thread_info.soft_volume = s->soft_volume;
435 s->thread_info.soft_muted = s->muted;
436
437 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
438 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
439 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->fixed_latency != 0));
440 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
442
443 pa_assert(s->monitor_source->fixed_latency == s->fixed_latency);
444 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
445 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
446
447 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
448
449 pa_source_put(s->monitor_source);
450
451 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
452 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
453 }
454
455 /* Called from main context */
456 void pa_sink_unlink(pa_sink* s) {
457 pa_bool_t linked;
458 pa_sink_input *i, *j = NULL;
459
460 pa_assert(s);
461
462 /* Please note that pa_sink_unlink() does more than simply
463 * reversing pa_sink_put(). It also undoes the registrations
464 * already done in pa_sink_new()! */
465
466 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
467 * may be called multiple times on the same sink without bad
468 * effects. */
469
470 linked = PA_SINK_IS_LINKED(s->state);
471
472 if (linked)
473 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
474
475 if (s->state != PA_SINK_UNLINKED)
476 pa_namereg_unregister(s->core, s->name);
477 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
478
479 if (s->card)
480 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
481
482 while ((i = pa_idxset_first(s->inputs, NULL))) {
483 pa_assert(i != j);
484 pa_sink_input_kill(i);
485 j = i;
486 }
487
488 if (linked)
489 sink_set_state(s, PA_SINK_UNLINKED);
490 else
491 s->state = PA_SINK_UNLINKED;
492
493 reset_callbacks(s);
494
495 if (s->monitor_source)
496 pa_source_unlink(s->monitor_source);
497
498 if (linked) {
499 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
500 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
501 }
502 }
503
504 /* Called from main context */
505 static void sink_free(pa_object *o) {
506 pa_sink *s = PA_SINK(o);
507 pa_sink_input *i;
508
509 pa_assert(s);
510 pa_assert(pa_sink_refcnt(s) == 0);
511
512 if (PA_SINK_IS_LINKED(s->state))
513 pa_sink_unlink(s);
514
515 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
516
517 if (s->monitor_source) {
518 pa_source_unref(s->monitor_source);
519 s->monitor_source = NULL;
520 }
521
522 pa_idxset_free(s->inputs, NULL, NULL);
523
524 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
525 pa_sink_input_unref(i);
526
527 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
528
529 if (s->silence.memblock)
530 pa_memblock_unref(s->silence.memblock);
531
532 pa_xfree(s->name);
533 pa_xfree(s->driver);
534
535 if (s->proplist)
536 pa_proplist_free(s->proplist);
537
538 if (s->ports) {
539 pa_device_port *p;
540
541 while ((p = pa_hashmap_steal_first(s->ports)))
542 pa_device_port_free(p);
543
544 pa_hashmap_free(s->ports, NULL, NULL);
545 }
546
547 pa_xfree(s);
548 }
549
550 /* Called from main context */
551 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
552 pa_sink_assert_ref(s);
553
554 s->asyncmsgq = q;
555
556 if (s->monitor_source)
557 pa_source_set_asyncmsgq(s->monitor_source, q);
558 }
559
560 /* Called from main context */
561 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
562 pa_sink_assert_ref(s);
563
564 s->rtpoll = p;
565
566 if (s->monitor_source)
567 pa_source_set_rtpoll(s->monitor_source, p);
568 }
569
570 /* Called from main context */
571 int pa_sink_update_status(pa_sink*s) {
572 pa_sink_assert_ref(s);
573 pa_assert(PA_SINK_IS_LINKED(s->state));
574
575 if (s->state == PA_SINK_SUSPENDED)
576 return 0;
577
578 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
579 }
580
581 /* Called from main context */
582 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
583 pa_sink_assert_ref(s);
584 pa_assert(PA_SINK_IS_LINKED(s->state));
585 pa_assert(cause != 0);
586
587 if (suspend) {
588 s->suspend_cause |= cause;
589 s->monitor_source->suspend_cause |= cause;
590 } else {
591 s->suspend_cause &= ~cause;
592 s->monitor_source->suspend_cause &= ~cause;
593 }
594
595 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
596 return 0;
597
598 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
599
600 if (s->suspend_cause)
601 return sink_set_state(s, PA_SINK_SUSPENDED);
602 else
603 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
604 }
605
606 /* Called from main context */
607 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
608 pa_sink_input *i, *n;
609 uint32_t idx;
610
611 pa_sink_assert_ref(s);
612 pa_assert(PA_SINK_IS_LINKED(s->state));
613
614 if (!q)
615 q = pa_queue_new();
616
617 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
618 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
619
620 pa_sink_input_ref(i);
621
622 if (pa_sink_input_start_move(i) >= 0)
623 pa_queue_push(q, i);
624 else
625 pa_sink_input_unref(i);
626 }
627
628 return q;
629 }
630
631 /* Called from main context */
632 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
633 pa_sink_input *i;
634
635 pa_sink_assert_ref(s);
636 pa_assert(PA_SINK_IS_LINKED(s->state));
637 pa_assert(q);
638
639 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
640 if (pa_sink_input_finish_move(i, s, save) < 0)
641 pa_sink_input_kill(i);
642
643 pa_sink_input_unref(i);
644 }
645
646 pa_queue_free(q, NULL, NULL);
647 }
648
649 /* Called from main context */
650 void pa_sink_move_all_fail(pa_queue *q) {
651 pa_sink_input *i;
652 pa_assert(q);
653
654 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
655 if (pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE_FAIL], i) == PA_HOOK_OK) {
656 pa_sink_input_kill(i);
657 pa_sink_input_unref(i);
658 }
659 }
660
661 pa_queue_free(q, NULL, NULL);
662 }
663
664 /* Called from IO thread context */
665 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
666 pa_sink_input *i;
667 void *state = NULL;
668 pa_sink_assert_ref(s);
669 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
670
671 /* If nobody requested this and this is actually no real rewind
672 * then we can short cut this */
673 if (!s->thread_info.rewind_requested && nbytes <= 0)
674 return;
675
676 s->thread_info.rewind_nbytes = 0;
677 s->thread_info.rewind_requested = FALSE;
678
679 if (s->thread_info.state == PA_SINK_SUSPENDED)
680 return;
681
682 if (nbytes > 0)
683 pa_log_debug("Processing rewind...");
684
685 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
686 pa_sink_input_assert_ref(i);
687 pa_sink_input_process_rewind(i, nbytes);
688 }
689
690 if (nbytes > 0)
691 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
692 pa_source_process_rewind(s->monitor_source, nbytes);
693 }
694
695 /* Called from IO thread context */
696 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
697 pa_sink_input *i;
698 unsigned n = 0;
699 void *state = NULL;
700 size_t mixlength = *length;
701
702 pa_sink_assert_ref(s);
703 pa_assert(info);
704
705 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
706 pa_sink_input_assert_ref(i);
707
708 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
709
710 if (mixlength == 0 || info->chunk.length < mixlength)
711 mixlength = info->chunk.length;
712
713 if (pa_memblock_is_silence(info->chunk.memblock)) {
714 pa_memblock_unref(info->chunk.memblock);
715 continue;
716 }
717
718 info->userdata = pa_sink_input_ref(i);
719
720 pa_assert(info->chunk.memblock);
721 pa_assert(info->chunk.length > 0);
722
723 info++;
724 n++;
725 maxinfo--;
726 }
727
728 if (mixlength > 0)
729 *length = mixlength;
730
731 return n;
732 }
733
734 /* Called from IO thread context */
735 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
736 pa_sink_input *i;
737 void *state = NULL;
738 unsigned p = 0;
739 unsigned n_unreffed = 0;
740
741 pa_sink_assert_ref(s);
742 pa_assert(result);
743 pa_assert(result->memblock);
744 pa_assert(result->length > 0);
745
746 /* We optimize for the case where the order of the inputs has not changed */
747
748 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
749 unsigned j;
750 pa_mix_info* m = NULL;
751
752 pa_sink_input_assert_ref(i);
753
754 /* Let's try to find the matching entry info the pa_mix_info array */
755 for (j = 0; j < n; j ++) {
756
757 if (info[p].userdata == i) {
758 m = info + p;
759 break;
760 }
761
762 p++;
763 if (p >= n)
764 p = 0;
765 }
766
767 /* Drop read data */
768 pa_sink_input_drop(i, result->length);
769
770 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
771
772 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
773 void *ostate = NULL;
774 pa_source_output *o;
775 pa_memchunk c;
776
777 if (m && m->chunk.memblock) {
778 c = m->chunk;
779 pa_memblock_ref(c.memblock);
780 pa_assert(result->length <= c.length);
781 c.length = result->length;
782
783 pa_memchunk_make_writable(&c, 0);
784 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
785 } else {
786 c = s->silence;
787 pa_memblock_ref(c.memblock);
788 pa_assert(result->length <= c.length);
789 c.length = result->length;
790 }
791
792 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
793 pa_source_output_assert_ref(o);
794 pa_assert(o->direct_on_input == i);
795 pa_source_post_direct(s->monitor_source, o, &c);
796 }
797
798 pa_memblock_unref(c.memblock);
799 }
800 }
801
802 if (m) {
803 if (m->chunk.memblock)
804 pa_memblock_unref(m->chunk.memblock);
805 pa_memchunk_reset(&m->chunk);
806
807 pa_sink_input_unref(m->userdata);
808 m->userdata = NULL;
809
810 n_unreffed += 1;
811 }
812 }
813
814 /* Now drop references to entries that are included in the
815 * pa_mix_info array but don't exist anymore */
816
817 if (n_unreffed < n) {
818 for (; n > 0; info++, n--) {
819 if (info->userdata)
820 pa_sink_input_unref(info->userdata);
821 if (info->chunk.memblock)
822 pa_memblock_unref(info->chunk.memblock);
823 }
824 }
825
826 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
827 pa_source_post(s->monitor_source, result);
828 }
829
830 /* Called from IO thread context */
831 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
832 pa_mix_info info[MAX_MIX_CHANNELS];
833 unsigned n;
834 size_t block_size_max;
835
836 pa_sink_assert_ref(s);
837 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
838 pa_assert(pa_frame_aligned(length, &s->sample_spec));
839 pa_assert(result);
840
841 pa_sink_ref(s);
842
843 pa_assert(!s->thread_info.rewind_requested);
844 pa_assert(s->thread_info.rewind_nbytes == 0);
845
846 if (s->thread_info.state == PA_SINK_SUSPENDED) {
847 result->memblock = pa_memblock_ref(s->silence.memblock);
848 result->index = s->silence.index;
849 result->length = PA_MIN(s->silence.length, length);
850 return;
851 }
852
853 if (length <= 0)
854 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
855
856 block_size_max = pa_mempool_block_size_max(s->core->mempool);
857 if (length > block_size_max)
858 length = pa_frame_align(block_size_max, &s->sample_spec);
859
860 pa_assert(length > 0);
861
862 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
863
864 if (n == 0) {
865
866 *result = s->silence;
867 pa_memblock_ref(result->memblock);
868
869 if (result->length > length)
870 result->length = length;
871
872 } else if (n == 1) {
873 pa_cvolume volume;
874
875 *result = info[0].chunk;
876 pa_memblock_ref(result->memblock);
877
878 if (result->length > length)
879 result->length = length;
880
881 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
882
883 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
884 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
885 pa_memblock_unref(result->memblock);
886 pa_silence_memchunk_get(&s->core->silence_cache,
887 s->core->mempool,
888 result,
889 &s->sample_spec,
890 result->length);
891 } else {
892 pa_memchunk_make_writable(result, 0);
893 pa_volume_memchunk(result, &s->sample_spec, &volume);
894 }
895 }
896 } else {
897 void *ptr;
898 result->memblock = pa_memblock_new(s->core->mempool, length);
899
900 ptr = pa_memblock_acquire(result->memblock);
901 result->length = pa_mix(info, n,
902 ptr, length,
903 &s->sample_spec,
904 &s->thread_info.soft_volume,
905 s->thread_info.soft_muted);
906 pa_memblock_release(result->memblock);
907
908 result->index = 0;
909 }
910
911 inputs_drop(s, info, n, result);
912
913 pa_sink_unref(s);
914 }
915
916 /* Called from IO thread context */
917 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
918 pa_mix_info info[MAX_MIX_CHANNELS];
919 unsigned n;
920 size_t length, block_size_max;
921
922 pa_sink_assert_ref(s);
923 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
924 pa_assert(target);
925 pa_assert(target->memblock);
926 pa_assert(target->length > 0);
927 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
928
929 pa_sink_ref(s);
930
931 pa_assert(!s->thread_info.rewind_requested);
932 pa_assert(s->thread_info.rewind_nbytes == 0);
933
934 if (s->thread_info.state == PA_SINK_SUSPENDED) {
935 pa_silence_memchunk(target, &s->sample_spec);
936 return;
937 }
938
939 length = target->length;
940 block_size_max = pa_mempool_block_size_max(s->core->mempool);
941 if (length > block_size_max)
942 length = pa_frame_align(block_size_max, &s->sample_spec);
943
944 pa_assert(length > 0);
945
946 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
947
948 if (n == 0) {
949 if (target->length > length)
950 target->length = length;
951
952 pa_silence_memchunk(target, &s->sample_spec);
953 } else if (n == 1) {
954 pa_cvolume volume;
955
956 if (target->length > length)
957 target->length = length;
958
959 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
960
961 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
962 pa_silence_memchunk(target, &s->sample_spec);
963 else {
964 pa_memchunk vchunk;
965
966 vchunk = info[0].chunk;
967 pa_memblock_ref(vchunk.memblock);
968
969 if (vchunk.length > length)
970 vchunk.length = length;
971
972 if (!pa_cvolume_is_norm(&volume)) {
973 pa_memchunk_make_writable(&vchunk, 0);
974 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
975 }
976
977 pa_memchunk_memcpy(target, &vchunk);
978 pa_memblock_unref(vchunk.memblock);
979 }
980
981 } else {
982 void *ptr;
983
984 ptr = pa_memblock_acquire(target->memblock);
985
986 target->length = pa_mix(info, n,
987 (uint8_t*) ptr + target->index, length,
988 &s->sample_spec,
989 &s->thread_info.soft_volume,
990 s->thread_info.soft_muted);
991
992 pa_memblock_release(target->memblock);
993 }
994
995 inputs_drop(s, info, n, target);
996
997 pa_sink_unref(s);
998 }
999
1000 /* Called from IO thread context */
1001 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1002 pa_memchunk chunk;
1003 size_t l, d;
1004
1005 pa_sink_assert_ref(s);
1006 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1007 pa_assert(target);
1008 pa_assert(target->memblock);
1009 pa_assert(target->length > 0);
1010 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1011
1012 pa_sink_ref(s);
1013
1014 pa_assert(!s->thread_info.rewind_requested);
1015 pa_assert(s->thread_info.rewind_nbytes == 0);
1016
1017 l = target->length;
1018 d = 0;
1019 while (l > 0) {
1020 chunk = *target;
1021 chunk.index += d;
1022 chunk.length -= d;
1023
1024 pa_sink_render_into(s, &chunk);
1025
1026 d += chunk.length;
1027 l -= chunk.length;
1028 }
1029
1030 pa_sink_unref(s);
1031 }
1032
1033 /* Called from IO thread context */
1034 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1035 pa_mix_info info[MAX_MIX_CHANNELS];
1036 size_t length1st = length;
1037 unsigned n;
1038
1039 pa_sink_assert_ref(s);
1040 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1041 pa_assert(length > 0);
1042 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1043 pa_assert(result);
1044
1045 pa_sink_ref(s);
1046
1047 pa_assert(!s->thread_info.rewind_requested);
1048 pa_assert(s->thread_info.rewind_nbytes == 0);
1049
1050 pa_assert(length > 0);
1051
1052 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1053
1054 if (n == 0) {
1055 pa_silence_memchunk_get(&s->core->silence_cache,
1056 s->core->mempool,
1057 result,
1058 &s->sample_spec,
1059 length1st);
1060 } else if (n == 1) {
1061 pa_cvolume volume;
1062
1063 *result = info[0].chunk;
1064 pa_memblock_ref(result->memblock);
1065
1066 if (result->length > length)
1067 result->length = length;
1068
1069 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1070
1071 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1072 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1073 pa_memblock_unref(result->memblock);
1074 pa_silence_memchunk_get(&s->core->silence_cache,
1075 s->core->mempool,
1076 result,
1077 &s->sample_spec,
1078 result->length);
1079 } else {
1080 pa_memchunk_make_writable(result, length);
1081 pa_volume_memchunk(result, &s->sample_spec, &volume);
1082 }
1083 }
1084 } else {
1085 void *ptr;
1086
1087 result->index = 0;
1088 result->memblock = pa_memblock_new(s->core->mempool, length);
1089
1090 ptr = pa_memblock_acquire(result->memblock);
1091
1092 result->length = pa_mix(info, n,
1093 (uint8_t*) ptr + result->index, length1st,
1094 &s->sample_spec,
1095 &s->thread_info.soft_volume,
1096 s->thread_info.soft_muted);
1097
1098 pa_memblock_release(result->memblock);
1099 }
1100
1101 inputs_drop(s, info, n, result);
1102
1103 if (result->length < length) {
1104 pa_memchunk chunk;
1105 size_t l, d;
1106 pa_memchunk_make_writable(result, length);
1107
1108 l = length - result->length;
1109 d = result->index + result->length;
1110 while (l > 0) {
1111 chunk = *result;
1112 chunk.index = d;
1113 chunk.length = l;
1114
1115 pa_sink_render_into(s, &chunk);
1116
1117 d += chunk.length;
1118 l -= chunk.length;
1119 }
1120 result->length = length;
1121 }
1122
1123 pa_sink_unref(s);
1124 }
1125
1126 /* Called from main thread */
1127 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1128 pa_usec_t usec = 0;
1129
1130 pa_sink_assert_ref(s);
1131 pa_assert(PA_SINK_IS_LINKED(s->state));
1132
1133 /* The returned value is supposed to be in the time domain of the sound card! */
1134
1135 if (s->state == PA_SINK_SUSPENDED)
1136 return 0;
1137
1138 if (!(s->flags & PA_SINK_LATENCY))
1139 return 0;
1140
1141 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1142
1143 return usec;
1144 }
1145
1146 /* Called from IO thread */
1147 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1148 pa_usec_t usec = 0;
1149 pa_msgobject *o;
1150
1151 pa_sink_assert_ref(s);
1152 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1153
1154 /* The returned value is supposed to be in the time domain of the sound card! */
1155
1156 if (s->thread_info.state == PA_SINK_SUSPENDED)
1157 return 0;
1158
1159 if (!(s->flags & PA_SINK_LATENCY))
1160 return 0;
1161
1162 o = PA_MSGOBJECT(s);
1163
1164 /* We probably should make this a proper vtable callback instead of going through process_msg() */
1165
1166 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1167 return -1;
1168
1169 return usec;
1170 }
1171
1172 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1173 unsigned c;
1174
1175 pa_sink_input_assert_ref(i);
1176 pa_assert(new_volume->channels == i->sample_spec.channels);
1177
1178 /*
1179 * This basically calculates:
1180 *
1181 * i->relative_volume := i->virtual_volume / new_volume
1182 * i->soft_volume := i->relative_volume * i->volume_factor
1183 */
1184
1185 /* The new sink volume passed in here must already be remapped to
1186 * the sink input's channel map! */
1187
1188 i->soft_volume.channels = i->sample_spec.channels;
1189
1190 for (c = 0; c < i->sample_spec.channels; c++)
1191
1192 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1193 /* We leave i->relative_volume untouched */
1194 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1195 else {
1196 i->relative_volume[c] =
1197 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1198 pa_sw_volume_to_linear(new_volume->values[c]);
1199
1200 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1201 i->relative_volume[c] *
1202 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1203 }
1204
1205 /* Hooks have the ability to play games with i->soft_volume */
1206 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1207
1208 /* We don't copy the soft_volume to the thread_info data
1209 * here. That must be done by the caller */
1210 }
1211
1212 /* Called from main thread */
1213 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1214 pa_sink_input *i;
1215 uint32_t idx;
1216
1217 pa_sink_assert_ref(s);
1218 pa_assert(new_volume);
1219 pa_assert(PA_SINK_IS_LINKED(s->state));
1220 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1221
1222 /* This is called whenever a sink input volume changes or a sink
1223 * input is added/removed and we might need to fix up the sink
1224 * volume accordingly. Please note that we don't actually update
1225 * the sinks volume here, we only return how it needs to be
1226 * updated. The caller should then call pa_sink_set_volume().*/
1227
1228 if (pa_idxset_isempty(s->inputs)) {
1229 /* In the special case that we have no sink input we leave the
1230 * volume unmodified. */
1231 *new_volume = s->reference_volume;
1232 return;
1233 }
1234
1235 pa_cvolume_mute(new_volume, s->channel_map.channels);
1236
1237 /* First let's determine the new maximum volume of all inputs
1238 * connected to this sink */
1239 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1240 unsigned c;
1241 pa_cvolume remapped_volume;
1242
1243 remapped_volume = i->virtual_volume;
1244 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1245
1246 for (c = 0; c < new_volume->channels; c++)
1247 if (remapped_volume.values[c] > new_volume->values[c])
1248 new_volume->values[c] = remapped_volume.values[c];
1249 }
1250
1251 /* Then, let's update the soft volumes of all inputs connected
1252 * to this sink */
1253 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1254 pa_cvolume remapped_new_volume;
1255
1256 remapped_new_volume = *new_volume;
1257 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1258 compute_new_soft_volume(i, &remapped_new_volume);
1259
1260 /* We don't copy soft_volume to the thread_info data here
1261 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1262 * want the update to be atomically with the sink volume
1263 * update, hence we do it within the pa_sink_set_volume() call
1264 * below */
1265 }
1266 }
1267
1268 /* Called from main thread */
1269 void pa_sink_propagate_flat_volume(pa_sink *s) {
1270 pa_sink_input *i;
1271 uint32_t idx;
1272
1273 pa_sink_assert_ref(s);
1274 pa_assert(PA_SINK_IS_LINKED(s->state));
1275 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1276
1277 /* This is called whenever the sink volume changes that is not
1278 * caused by a sink input volume change. We need to fix up the
1279 * sink input volumes accordingly */
1280
1281 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1282 pa_cvolume sink_volume, new_virtual_volume;
1283 unsigned c;
1284
1285 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1286
1287 sink_volume = s->virtual_volume;
1288 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1289
1290 for (c = 0; c < i->sample_spec.channels; c++)
1291 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1292 i->relative_volume[c] *
1293 pa_sw_volume_to_linear(sink_volume.values[c]));
1294
1295 new_virtual_volume.channels = i->sample_spec.channels;
1296
1297 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1298 i->virtual_volume = new_virtual_volume;
1299
1300 /* Hmm, the soft volume might no longer actually match
1301 * what has been chosen as new virtual volume here,
1302 * especially when the old volume was
1303 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1304 * volumes here. */
1305 compute_new_soft_volume(i, &sink_volume);
1306
1307 /* The virtual volume changed, let's tell people so */
1308 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1309 }
1310 }
1311
1312 /* If the soft_volume of any of the sink inputs got changed, let's
1313 * make sure the thread copies are synced up. */
1314 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1315 }
1316
1317 /* Called from main thread */
1318 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1319 pa_bool_t virtual_volume_changed;
1320
1321 pa_sink_assert_ref(s);
1322 pa_assert(PA_SINK_IS_LINKED(s->state));
1323 pa_assert(volume);
1324 pa_assert(pa_cvolume_valid(volume));
1325 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1326
1327 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1328 s->virtual_volume = *volume;
1329 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1330
1331 if (become_reference)
1332 s->reference_volume = s->virtual_volume;
1333
1334 /* Propagate this volume change back to the inputs */
1335 if (virtual_volume_changed)
1336 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1337 pa_sink_propagate_flat_volume(s);
1338
1339 if (s->set_volume) {
1340 /* If we have a function set_volume(), then we do not apply a
1341 * soft volume by default. However, set_volume() is free to
1342 * apply one to s->soft_volume */
1343
1344 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1345 s->set_volume(s);
1346
1347 } else
1348 /* If we have no function set_volume(), then the soft volume
1349 * becomes the virtual volume */
1350 s->soft_volume = s->virtual_volume;
1351
1352 /* This tells the sink that soft and/or virtual volume changed */
1353 if (sendmsg)
1354 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1355
1356 if (virtual_volume_changed)
1357 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1358 }
1359
1360 /* Called from main thread. Only to be called by sink implementor */
1361 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1362 pa_sink_assert_ref(s);
1363 pa_assert(volume);
1364
1365 s->soft_volume = *volume;
1366
1367 if (PA_SINK_IS_LINKED(s->state))
1368 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1369 else
1370 s->thread_info.soft_volume = *volume;
1371 }
1372
1373 /* Called from main thread */
1374 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1375 pa_sink_assert_ref(s);
1376
1377 if (s->refresh_volume || force_refresh) {
1378 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1379
1380 if (s->get_volume)
1381 s->get_volume(s);
1382
1383 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1384
1385 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1386
1387 s->reference_volume = s->virtual_volume;
1388
1389 if (s->flags & PA_SINK_FLAT_VOLUME)
1390 pa_sink_propagate_flat_volume(s);
1391
1392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1393 }
1394 }
1395
1396 return reference ? &s->reference_volume : &s->virtual_volume;
1397 }
1398
1399 /* Called from main thread */
1400 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume, pa_bool_t save) {
1401 pa_sink_assert_ref(s);
1402
1403 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1404 if (pa_cvolume_equal(&s->virtual_volume, new_volume)) {
1405 s->save_volume = s->save_volume || save;
1406 return;
1407 }
1408
1409 s->reference_volume = s->virtual_volume = *new_volume;
1410 s->save_volume = save;
1411
1412 if (s->flags & PA_SINK_FLAT_VOLUME)
1413 pa_sink_propagate_flat_volume(s);
1414
1415 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1416 }
1417
1418 /* Called from main thread */
1419 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1420 pa_bool_t old_muted;
1421
1422 pa_sink_assert_ref(s);
1423 pa_assert(PA_SINK_IS_LINKED(s->state));
1424
1425 old_muted = s->muted;
1426 s->muted = mute;
1427 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1428
1429 if (s->set_mute)
1430 s->set_mute(s);
1431
1432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1433
1434 if (old_muted != s->muted)
1435 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1436 }
1437
1438 /* Called from main thread */
1439 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1440
1441 pa_sink_assert_ref(s);
1442
1443 if (s->refresh_muted || force_refresh) {
1444 pa_bool_t old_muted = s->muted;
1445
1446 if (s->get_mute)
1447 s->get_mute(s);
1448
1449 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1450
1451 if (old_muted != s->muted) {
1452 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1453
1454 /* Make sure the soft mute status stays in sync */
1455 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1456 }
1457 }
1458
1459 return s->muted;
1460 }
1461
1462 /* Called from main thread */
1463 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted, pa_bool_t save) {
1464 pa_sink_assert_ref(s);
1465
1466 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1467
1468 if (s->muted == new_muted) {
1469 s->save_muted = s->save_muted || save;
1470 return;
1471 }
1472
1473 s->muted = new_muted;
1474 s->save_muted = save;
1475
1476 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1477 }
1478
1479 /* Called from main thread */
1480 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1481 pa_sink_assert_ref(s);
1482
1483 if (p)
1484 pa_proplist_update(s->proplist, mode, p);
1485
1486 if (PA_SINK_IS_LINKED(s->state)) {
1487 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1488 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1489 }
1490
1491 return TRUE;
1492 }
1493
1494 /* Called from main thread */
1495 void pa_sink_set_description(pa_sink *s, const char *description) {
1496 const char *old;
1497 pa_sink_assert_ref(s);
1498
1499 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1500 return;
1501
1502 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1503
1504 if (old && description && !strcmp(old, description))
1505 return;
1506
1507 if (description)
1508 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1509 else
1510 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1511
1512 if (s->monitor_source) {
1513 char *n;
1514
1515 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1516 pa_source_set_description(s->monitor_source, n);
1517 pa_xfree(n);
1518 }
1519
1520 if (PA_SINK_IS_LINKED(s->state)) {
1521 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1522 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1523 }
1524 }
1525
1526 /* Called from main thread */
1527 unsigned pa_sink_linked_by(pa_sink *s) {
1528 unsigned ret;
1529
1530 pa_sink_assert_ref(s);
1531 pa_assert(PA_SINK_IS_LINKED(s->state));
1532
1533 ret = pa_idxset_size(s->inputs);
1534
1535 /* We add in the number of streams connected to us here. Please
1536 * note the asymmmetry to pa_sink_used_by()! */
1537
1538 if (s->monitor_source)
1539 ret += pa_source_linked_by(s->monitor_source);
1540
1541 return ret;
1542 }
1543
1544 /* Called from main thread */
1545 unsigned pa_sink_used_by(pa_sink *s) {
1546 unsigned ret;
1547
1548 pa_sink_assert_ref(s);
1549 pa_assert(PA_SINK_IS_LINKED(s->state));
1550
1551 ret = pa_idxset_size(s->inputs);
1552 pa_assert(ret >= s->n_corked);
1553
1554 /* Streams connected to our monitor source do not matter for
1555 * pa_sink_used_by()!.*/
1556
1557 return ret - s->n_corked;
1558 }
1559
1560 /* Called from main thread */
1561 unsigned pa_sink_check_suspend(pa_sink *s) {
1562 unsigned ret;
1563 pa_sink_input *i;
1564 uint32_t idx;
1565
1566 pa_sink_assert_ref(s);
1567
1568 if (!PA_SINK_IS_LINKED(s->state))
1569 return 0;
1570
1571 ret = 0;
1572
1573 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1574 pa_sink_input_state_t st;
1575
1576 st = pa_sink_input_get_state(i);
1577 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1578
1579 if (st == PA_SINK_INPUT_CORKED)
1580 continue;
1581
1582 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1583 continue;
1584
1585 ret ++;
1586 }
1587
1588 if (s->monitor_source)
1589 ret += pa_source_check_suspend(s->monitor_source);
1590
1591 return ret;
1592 }
1593
1594 /* Called from the IO thread */
1595 static void sync_input_volumes_within_thread(pa_sink *s) {
1596 pa_sink_input *i;
1597 void *state = NULL;
1598
1599 pa_sink_assert_ref(s);
1600
1601 while ((i = PA_SINK_INPUT(pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))) {
1602 if (pa_atomic_load(&i->before_ramping_v))
1603 i->thread_info.future_soft_volume = i->soft_volume;
1604
1605 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1606 continue;
1607
1608 if (!pa_atomic_load(&i->before_ramping_v))
1609 i->thread_info.soft_volume = i->soft_volume;
1610 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1611 }
1612 }
1613
1614 /* Called from IO thread, except when it is not */
1615 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1616 pa_sink *s = PA_SINK(o);
1617 pa_sink_assert_ref(s);
1618
1619 switch ((pa_sink_message_t) code) {
1620
1621 case PA_SINK_MESSAGE_ADD_INPUT: {
1622 pa_sink_input *i = PA_SINK_INPUT(userdata);
1623
1624 /* If you change anything here, make sure to change the
1625 * sink input handling a few lines down at
1626 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1627
1628 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1629
1630 /* Since the caller sleeps in pa_sink_input_put(), we can
1631 * safely access data outside of thread_info even though
1632 * it is mutable */
1633
1634 if ((i->thread_info.sync_prev = i->sync_prev)) {
1635 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1636 pa_assert(i->sync_prev->sync_next == i);
1637 i->thread_info.sync_prev->thread_info.sync_next = i;
1638 }
1639
1640 if ((i->thread_info.sync_next = i->sync_next)) {
1641 pa_assert(i->sink == i->thread_info.sync_next->sink);
1642 pa_assert(i->sync_next->sync_prev == i);
1643 i->thread_info.sync_next->thread_info.sync_prev = i;
1644 }
1645
1646 pa_assert(!i->thread_info.attached);
1647 i->thread_info.attached = TRUE;
1648
1649 if (i->attach)
1650 i->attach(i);
1651
1652 pa_sink_input_set_state_within_thread(i, i->state);
1653
1654 /* The requested latency of the sink input needs to be
1655 * fixed up and then configured on the sink */
1656
1657 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1658 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1659
1660 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1661 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1662
1663 /* We don't rewind here automatically. This is left to the
1664 * sink input implementor because some sink inputs need a
1665 * slow start, i.e. need some time to buffer client
1666 * samples before beginning streaming. */
1667
1668 /* In flat volume mode we need to update the volume as
1669 * well */
1670 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1671 }
1672
1673 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1674 pa_sink_input *i = PA_SINK_INPUT(userdata);
1675
1676 /* If you change anything here, make sure to change the
1677 * sink input handling a few lines down at
1678 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1679
1680 if (i->detach)
1681 i->detach(i);
1682
1683 pa_sink_input_set_state_within_thread(i, i->state);
1684
1685 pa_assert(i->thread_info.attached);
1686 i->thread_info.attached = FALSE;
1687
1688 /* Since the caller sleeps in pa_sink_input_unlink(),
1689 * we can safely access data outside of thread_info even
1690 * though it is mutable */
1691
1692 pa_assert(!i->sync_prev);
1693 pa_assert(!i->sync_next);
1694
1695 if (i->thread_info.sync_prev) {
1696 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1697 i->thread_info.sync_prev = NULL;
1698 }
1699
1700 if (i->thread_info.sync_next) {
1701 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1702 i->thread_info.sync_next = NULL;
1703 }
1704
1705 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1706 pa_sink_input_unref(i);
1707
1708 pa_sink_invalidate_requested_latency(s);
1709 pa_sink_request_rewind(s, (size_t) -1);
1710
1711 /* In flat volume mode we need to update the volume as
1712 * well */
1713 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1714 }
1715
1716 case PA_SINK_MESSAGE_START_MOVE: {
1717 pa_sink_input *i = PA_SINK_INPUT(userdata);
1718
1719 /* We don't support moving synchronized streams. */
1720 pa_assert(!i->sync_prev);
1721 pa_assert(!i->sync_next);
1722 pa_assert(!i->thread_info.sync_next);
1723 pa_assert(!i->thread_info.sync_prev);
1724
1725 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1726 pa_usec_t usec = 0;
1727 size_t sink_nbytes, total_nbytes;
1728
1729 /* Get the latency of the sink */
1730 if (!(s->flags & PA_SINK_LATENCY) ||
1731 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1732 usec = 0;
1733
1734 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1735 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1736
1737 if (total_nbytes > 0) {
1738 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1739 i->thread_info.rewrite_flush = TRUE;
1740 pa_sink_input_process_rewind(i, sink_nbytes);
1741 }
1742 }
1743
1744 if (i->detach)
1745 i->detach(i);
1746
1747 pa_assert(i->thread_info.attached);
1748 i->thread_info.attached = FALSE;
1749
1750 /* Let's remove the sink input ...*/
1751 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1752 pa_sink_input_unref(i);
1753
1754 pa_sink_invalidate_requested_latency(s);
1755
1756 pa_log_debug("Requesting rewind due to started move");
1757 pa_sink_request_rewind(s, (size_t) -1);
1758
1759 /* In flat volume mode we need to update the volume as
1760 * well */
1761 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1762 }
1763
1764 case PA_SINK_MESSAGE_FINISH_MOVE: {
1765 pa_sink_input *i = PA_SINK_INPUT(userdata);
1766
1767 /* We don't support moving synchronized streams. */
1768 pa_assert(!i->sync_prev);
1769 pa_assert(!i->sync_next);
1770 pa_assert(!i->thread_info.sync_next);
1771 pa_assert(!i->thread_info.sync_prev);
1772
1773 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1774
1775 pa_assert(!i->thread_info.attached);
1776 i->thread_info.attached = TRUE;
1777
1778 if (i->attach)
1779 i->attach(i);
1780
1781 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1782 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1783
1784 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1785 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1786
1787 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1788 pa_usec_t usec = 0;
1789 size_t nbytes;
1790
1791 /* Get the latency of the sink */
1792 if (!(s->flags & PA_SINK_LATENCY) ||
1793 PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1794 usec = 0;
1795
1796 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1797
1798 if (nbytes > 0)
1799 pa_sink_input_drop(i, nbytes);
1800
1801 pa_log_debug("Requesting rewind due to finished move");
1802 pa_sink_request_rewind(s, nbytes);
1803 }
1804
1805 /* In flat volume mode we need to update the volume as
1806 * well */
1807 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1808 }
1809
1810 case PA_SINK_MESSAGE_SET_VOLUME:
1811
1812 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1813 s->thread_info.soft_volume = s->soft_volume;
1814 pa_sink_request_rewind(s, (size_t) -1);
1815 }
1816
1817 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1818 return 0;
1819
1820 /* Fall through ... */
1821
1822 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1823 sync_input_volumes_within_thread(s);
1824 return 0;
1825
1826 case PA_SINK_MESSAGE_GET_VOLUME:
1827 return 0;
1828
1829 case PA_SINK_MESSAGE_SET_MUTE:
1830
1831 if (s->thread_info.soft_muted != s->muted) {
1832 s->thread_info.soft_muted = s->muted;
1833 pa_sink_request_rewind(s, (size_t) -1);
1834 }
1835
1836 return 0;
1837
1838 case PA_SINK_MESSAGE_GET_MUTE:
1839 return 0;
1840
1841 case PA_SINK_MESSAGE_SET_STATE: {
1842
1843 pa_bool_t suspend_change =
1844 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1845 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1846
1847 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1848
1849 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1850 s->thread_info.rewind_nbytes = 0;
1851 s->thread_info.rewind_requested = FALSE;
1852 }
1853
1854 if (suspend_change) {
1855 pa_sink_input *i;
1856 void *state = NULL;
1857
1858 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1859 if (i->suspend_within_thread)
1860 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1861 }
1862
1863 return 0;
1864 }
1865
1866 case PA_SINK_MESSAGE_DETACH:
1867
1868 /* Detach all streams */
1869 pa_sink_detach_within_thread(s);
1870 return 0;
1871
1872 case PA_SINK_MESSAGE_ATTACH:
1873
1874 /* Reattach all streams */
1875 pa_sink_attach_within_thread(s);
1876 return 0;
1877
1878 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1879
1880 pa_usec_t *usec = userdata;
1881 *usec = pa_sink_get_requested_latency_within_thread(s);
1882
1883 if (*usec == (pa_usec_t) -1)
1884 *usec = s->thread_info.max_latency;
1885
1886 return 0;
1887 }
1888
1889 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1890 pa_usec_t *r = userdata;
1891
1892 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1893
1894 return 0;
1895 }
1896
1897 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1898 pa_usec_t *r = userdata;
1899
1900 r[0] = s->thread_info.min_latency;
1901 r[1] = s->thread_info.max_latency;
1902
1903 return 0;
1904 }
1905
1906 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1907
1908 *((size_t*) userdata) = s->thread_info.max_rewind;
1909 return 0;
1910
1911 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1912
1913 *((size_t*) userdata) = s->thread_info.max_request;
1914 return 0;
1915
1916 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1917
1918 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1919 return 0;
1920
1921 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1922
1923 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1924 return 0;
1925
1926 case PA_SINK_MESSAGE_GET_LATENCY:
1927 case PA_SINK_MESSAGE_MAX:
1928 ;
1929 }
1930
1931 return -1;
1932 }
1933
1934 /* Called from main thread */
1935 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1936 pa_sink *sink;
1937 uint32_t idx;
1938 int ret = 0;
1939
1940 pa_core_assert_ref(c);
1941 pa_assert(cause != 0);
1942
1943 for (sink = PA_SINK(pa_idxset_first(c->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(c->sinks, &idx))) {
1944 int r;
1945
1946 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1947 ret = r;
1948 }
1949
1950 return ret;
1951 }
1952
1953 /* Called from main thread */
1954 void pa_sink_detach(pa_sink *s) {
1955 pa_sink_assert_ref(s);
1956 pa_assert(PA_SINK_IS_LINKED(s->state));
1957
1958 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
1959 }
1960
1961 /* Called from main thread */
1962 void pa_sink_attach(pa_sink *s) {
1963 pa_sink_assert_ref(s);
1964 pa_assert(PA_SINK_IS_LINKED(s->state));
1965
1966 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
1967 }
1968
1969 /* Called from IO thread */
1970 void pa_sink_detach_within_thread(pa_sink *s) {
1971 pa_sink_input *i;
1972 void *state = NULL;
1973
1974 pa_sink_assert_ref(s);
1975 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1976
1977 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1978 if (i->detach)
1979 i->detach(i);
1980
1981 if (s->monitor_source)
1982 pa_source_detach_within_thread(s->monitor_source);
1983 }
1984
1985 /* Called from IO thread */
1986 void pa_sink_attach_within_thread(pa_sink *s) {
1987 pa_sink_input *i;
1988 void *state = NULL;
1989
1990 pa_sink_assert_ref(s);
1991 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1992
1993 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1994 if (i->attach)
1995 i->attach(i);
1996
1997 if (s->monitor_source)
1998 pa_source_attach_within_thread(s->monitor_source);
1999 }
2000
2001 /* Called from IO thread */
2002 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2003 pa_sink_assert_ref(s);
2004 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2005
2006 if (s->thread_info.state == PA_SINK_SUSPENDED)
2007 return;
2008
2009 if (nbytes == (size_t) -1)
2010 nbytes = s->thread_info.max_rewind;
2011
2012 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2013
2014 if (s->thread_info.rewind_requested &&
2015 nbytes <= s->thread_info.rewind_nbytes)
2016 return;
2017
2018 s->thread_info.rewind_nbytes = nbytes;
2019 s->thread_info.rewind_requested = TRUE;
2020
2021 if (s->request_rewind)
2022 s->request_rewind(s);
2023 }
2024
2025 /* Called from IO thread */
2026 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2027 pa_usec_t result = (pa_usec_t) -1;
2028 pa_sink_input *i;
2029 void *state = NULL;
2030 pa_usec_t monitor_latency;
2031
2032 pa_sink_assert_ref(s);
2033
2034 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2035 return PA_CLAMP(s->fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2036
2037 if (s->thread_info.requested_latency_valid)
2038 return s->thread_info.requested_latency;
2039
2040 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2041
2042 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2043 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2044 result = i->thread_info.requested_sink_latency;
2045
2046 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2047
2048 if (monitor_latency != (pa_usec_t) -1 &&
2049 (result == (pa_usec_t) -1 || result > monitor_latency))
2050 result = monitor_latency;
2051
2052 if (result != (pa_usec_t) -1)
2053 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2054
2055 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2056 /* Only cache if properly initialized */
2057 s->thread_info.requested_latency = result;
2058 s->thread_info.requested_latency_valid = TRUE;
2059 }
2060
2061 return result;
2062 }
2063
2064 /* Called from main thread */
2065 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2066 pa_usec_t usec = 0;
2067
2068 pa_sink_assert_ref(s);
2069 pa_assert(PA_SINK_IS_LINKED(s->state));
2070
2071 if (s->state == PA_SINK_SUSPENDED)
2072 return 0;
2073
2074 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2075 return usec;
2076 }
2077
2078 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2079 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2080 pa_sink_input *i;
2081 void *state = NULL;
2082
2083 pa_sink_assert_ref(s);
2084
2085 if (max_rewind == s->thread_info.max_rewind)
2086 return;
2087
2088 s->thread_info.max_rewind = max_rewind;
2089
2090 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2091 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2092 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2093 }
2094
2095 if (s->monitor_source)
2096 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2097 }
2098
2099 /* Called from main thread */
2100 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2101 pa_sink_assert_ref(s);
2102
2103 if (PA_SINK_IS_LINKED(s->state))
2104 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2105 else
2106 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2107 }
2108
2109 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2110 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2111 void *state = NULL;
2112
2113 pa_sink_assert_ref(s);
2114
2115 if (max_request == s->thread_info.max_request)
2116 return;
2117
2118 s->thread_info.max_request = max_request;
2119
2120 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2121 pa_sink_input *i;
2122
2123 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2124 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2125 }
2126 }
2127
2128 /* Called from main thread */
2129 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2130 pa_sink_assert_ref(s);
2131
2132 if (PA_SINK_IS_LINKED(s->state))
2133 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2134 else
2135 pa_sink_set_max_request_within_thread(s, max_request);
2136 }
2137
2138 /* Called from IO thread */
2139 void pa_sink_invalidate_requested_latency(pa_sink *s) {
2140 pa_sink_input *i;
2141 void *state = NULL;
2142
2143 pa_sink_assert_ref(s);
2144
2145 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2146 return;
2147
2148 s->thread_info.requested_latency_valid = FALSE;
2149
2150 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2151
2152 if (s->update_requested_latency)
2153 s->update_requested_latency(s);
2154
2155 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2156 if (i->update_sink_requested_latency)
2157 i->update_sink_requested_latency(i);
2158 }
2159 }
2160
2161 /* Called from main thread */
2162 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2163 pa_sink_assert_ref(s);
2164
2165 /* min_latency == 0: no limit
2166 * min_latency anything else: specified limit
2167 *
2168 * Similar for max_latency */
2169
2170 if (min_latency < ABSOLUTE_MIN_LATENCY)
2171 min_latency = ABSOLUTE_MIN_LATENCY;
2172
2173 if (max_latency <= 0 ||
2174 max_latency > ABSOLUTE_MAX_LATENCY)
2175 max_latency = ABSOLUTE_MAX_LATENCY;
2176
2177 pa_assert(min_latency <= max_latency);
2178
2179 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2180 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2181 max_latency == ABSOLUTE_MAX_LATENCY) ||
2182 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2183
2184 if (PA_SINK_IS_LINKED(s->state)) {
2185 pa_usec_t r[2];
2186
2187 r[0] = min_latency;
2188 r[1] = max_latency;
2189
2190 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2191 } else
2192 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2193 }
2194
2195 /* Called from main thread */
2196 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2197 pa_sink_assert_ref(s);
2198 pa_assert(min_latency);
2199 pa_assert(max_latency);
2200
2201 if (PA_SINK_IS_LINKED(s->state)) {
2202 pa_usec_t r[2] = { 0, 0 };
2203
2204 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2205
2206 *min_latency = r[0];
2207 *max_latency = r[1];
2208 } else {
2209 *min_latency = s->thread_info.min_latency;
2210 *max_latency = s->thread_info.max_latency;
2211 }
2212 }
2213
2214 /* Called from IO thread */
2215 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2216 void *state = NULL;
2217
2218 pa_sink_assert_ref(s);
2219
2220 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2221 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2222 pa_assert(min_latency <= max_latency);
2223
2224 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2225 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2226 max_latency == ABSOLUTE_MAX_LATENCY) ||
2227 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2228
2229 s->thread_info.min_latency = min_latency;
2230 s->thread_info.max_latency = max_latency;
2231
2232 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2233 pa_sink_input *i;
2234
2235 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
2236 if (i->update_sink_latency_range)
2237 i->update_sink_latency_range(i);
2238 }
2239
2240 pa_sink_invalidate_requested_latency(s);
2241
2242 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2243 }
2244
2245 /* Called from main thread, before the sink is put */
2246 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2247 pa_sink_assert_ref(s);
2248
2249 pa_assert(pa_sink_get_state(s) == PA_SINK_INIT);
2250
2251 if (latency < ABSOLUTE_MIN_LATENCY)
2252 latency = ABSOLUTE_MIN_LATENCY;
2253
2254 if (latency > ABSOLUTE_MAX_LATENCY)
2255 latency = ABSOLUTE_MAX_LATENCY;
2256
2257 s->fixed_latency = latency;
2258 pa_source_set_fixed_latency(s->monitor_source, latency);
2259 }
2260
2261 /* Called from main context */
2262 size_t pa_sink_get_max_rewind(pa_sink *s) {
2263 size_t r;
2264 pa_sink_assert_ref(s);
2265
2266 if (!PA_SINK_IS_LINKED(s->state))
2267 return s->thread_info.max_rewind;
2268
2269 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2270
2271 return r;
2272 }
2273
2274 /* Called from main context */
2275 size_t pa_sink_get_max_request(pa_sink *s) {
2276 size_t r;
2277 pa_sink_assert_ref(s);
2278
2279 if (!PA_SINK_IS_LINKED(s->state))
2280 return s->thread_info.max_request;
2281
2282 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2283
2284 return r;
2285 }
2286
2287 /* Called from main context */
2288 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2289 pa_device_port *port;
2290
2291 pa_assert(s);
2292
2293 if (!s->set_port) {
2294 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2295 return -PA_ERR_NOTIMPLEMENTED;
2296 }
2297
2298 if (!s->ports)
2299 return -PA_ERR_NOENTITY;
2300
2301 if (!(port = pa_hashmap_get(s->ports, name)))
2302 return -PA_ERR_NOENTITY;
2303
2304 if (s->active_port == port) {
2305 s->save_port = s->save_port || save;
2306 return 0;
2307 }
2308
2309 if ((s->set_port(s, port)) < 0)
2310 return -PA_ERR_NOENTITY;
2311
2312 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2313
2314 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2315
2316 s->active_port = port;
2317 s->save_port = save;
2318
2319 return 0;
2320 }
2321
2322 /* Called from main context */
2323 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2324 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2325
2326 pa_assert(p);
2327
2328 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2329 return TRUE;
2330
2331 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2332
2333 if (pa_streq(ff, "microphone"))
2334 t = "audio-input-microphone";
2335 else if (pa_streq(ff, "webcam"))
2336 t = "camera-web";
2337 else if (pa_streq(ff, "computer"))
2338 t = "computer";
2339 else if (pa_streq(ff, "handset"))
2340 t = "phone";
2341 else if (pa_streq(ff, "portable"))
2342 t = "multimedia-player";
2343 else if (pa_streq(ff, "tv"))
2344 t = "video-display";
2345
2346 /*
2347 * The following icons are not part of the icon naming spec,
2348 * because Rodney Dawes sucks as the maintainer of that spec.
2349 *
2350 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2351 */
2352 else if (pa_streq(ff, "headset"))
2353 t = "audio-headset";
2354 else if (pa_streq(ff, "headphone"))
2355 t = "audio-headphones";
2356 else if (pa_streq(ff, "speaker"))
2357 t = "audio-speakers";
2358 else if (pa_streq(ff, "hands-free"))
2359 t = "audio-handsfree";
2360 }
2361
2362 if (!t)
2363 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2364 if (pa_streq(c, "modem"))
2365 t = "modem";
2366
2367 if (!t) {
2368 if (is_sink)
2369 t = "audio-card";
2370 else
2371 t = "audio-input-microphone";
2372 }
2373
2374 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2375 if (strstr(profile, "analog"))
2376 s = "-analog";
2377 else if (strstr(profile, "iec958"))
2378 s = "-iec958";
2379 else if (strstr(profile, "hdmi"))
2380 s = "-hdmi";
2381 }
2382
2383 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2384
2385 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2386
2387 return TRUE;
2388 }
2389
2390 pa_bool_t pa_device_init_description(pa_proplist *p) {
2391 const char *s, *d = NULL, *k;
2392 pa_assert(p);
2393
2394 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2395 return TRUE;
2396
2397 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2398 if (pa_streq(s, "internal"))
2399 d = _("Internal Audio");
2400
2401 if (!d)
2402 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2403 if (pa_streq(s, "modem"))
2404 d = _("Modem");
2405
2406 if (!d)
2407 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2408
2409 if (!d)
2410 return FALSE;
2411
2412 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2413
2414 if (d && k)
2415 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2416 else if (d)
2417 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2418
2419 return TRUE;
2420 }
2421
2422 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2423 const char *s;
2424 pa_assert(p);
2425
2426 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2427 return TRUE;
2428
2429 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2430 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2431 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2432 return TRUE;
2433 }
2434
2435 return FALSE;
2436 }