]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: call pa_sink_get_latency_within_thread() instead of going directly via process_...
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
309
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
311
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
323
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
331
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339
340 pa_source_new_data_done(&source_data);
341
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
346 }
347
348 s->monitor_source->monitor_of = s;
349
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
352 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
353
354 return s;
355 }
356
357 /* Called from main context */
358 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
359 int ret;
360 pa_bool_t suspend_change;
361 pa_sink_state_t original_state;
362
363 pa_assert(s);
364 pa_assert_ctl_context();
365
366 if (s->state == state)
367 return 0;
368
369 original_state = s->state;
370
371 suspend_change =
372 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
373 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
374
375 if (s->set_state)
376 if ((ret = s->set_state(s, state)) < 0)
377 return ret;
378
379 if (s->asyncmsgq)
380 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
381
382 if (s->set_state)
383 s->set_state(s, original_state);
384
385 return ret;
386 }
387
388 s->state = state;
389
390 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
393 }
394
395 if (suspend_change) {
396 pa_sink_input *i;
397 uint32_t idx;
398
399 /* We're suspending or resuming, tell everyone about it */
400
401 PA_IDXSET_FOREACH(i, s->inputs, idx)
402 if (s->state == PA_SINK_SUSPENDED &&
403 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
404 pa_sink_input_kill(i);
405 else if (i->suspend)
406 i->suspend(i, state == PA_SINK_SUSPENDED);
407
408 if (s->monitor_source)
409 pa_source_sync_suspend(s->monitor_source);
410 }
411
412 return 0;
413 }
414
415 /* Called from main context */
416 void pa_sink_put(pa_sink* s) {
417 pa_sink_assert_ref(s);
418 pa_assert_ctl_context();
419
420 pa_assert(s->state == PA_SINK_INIT);
421
422 /* The following fields must be initialized properly when calling _put() */
423 pa_assert(s->asyncmsgq);
424 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
425
426 /* Generally, flags should be initialized via pa_sink_new(). As a
427 * special exception we allow volume related flags to be set
428 * between _new() and _put(). */
429
430 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
431 s->flags |= PA_SINK_DECIBEL_VOLUME;
432
433 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
434 s->flags |= PA_SINK_FLAT_VOLUME;
435
436 s->thread_info.soft_volume = s->soft_volume;
437 s->thread_info.soft_muted = s->muted;
438
439 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
440 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
442 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
443 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
444
445 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
446 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
447 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
448
449 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
450
451 pa_source_put(s->monitor_source);
452
453 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
454 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
455 }
456
457 /* Called from main context */
458 void pa_sink_unlink(pa_sink* s) {
459 pa_bool_t linked;
460 pa_sink_input *i, *j = NULL;
461
462 pa_assert(s);
463 pa_assert_ctl_context();
464
465 /* Please note that pa_sink_unlink() does more than simply
466 * reversing pa_sink_put(). It also undoes the registrations
467 * already done in pa_sink_new()! */
468
469 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
470 * may be called multiple times on the same sink without bad
471 * effects. */
472
473 linked = PA_SINK_IS_LINKED(s->state);
474
475 if (linked)
476 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
477
478 if (s->state != PA_SINK_UNLINKED)
479 pa_namereg_unregister(s->core, s->name);
480 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
481
482 if (s->card)
483 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
484
485 while ((i = pa_idxset_first(s->inputs, NULL))) {
486 pa_assert(i != j);
487 pa_sink_input_kill(i);
488 j = i;
489 }
490
491 if (linked)
492 sink_set_state(s, PA_SINK_UNLINKED);
493 else
494 s->state = PA_SINK_UNLINKED;
495
496 reset_callbacks(s);
497
498 if (s->monitor_source)
499 pa_source_unlink(s->monitor_source);
500
501 if (linked) {
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
503 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
504 }
505 }
506
507 /* Called from main context */
508 static void sink_free(pa_object *o) {
509 pa_sink *s = PA_SINK(o);
510 pa_sink_input *i;
511
512 pa_assert(s);
513 pa_assert_ctl_context();
514 pa_assert(pa_sink_refcnt(s) == 0);
515
516 if (PA_SINK_IS_LINKED(s->state))
517 pa_sink_unlink(s);
518
519 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
520
521 if (s->monitor_source) {
522 pa_source_unref(s->monitor_source);
523 s->monitor_source = NULL;
524 }
525
526 pa_idxset_free(s->inputs, NULL, NULL);
527
528 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
529 pa_sink_input_unref(i);
530
531 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
532
533 if (s->silence.memblock)
534 pa_memblock_unref(s->silence.memblock);
535
536 pa_xfree(s->name);
537 pa_xfree(s->driver);
538
539 if (s->proplist)
540 pa_proplist_free(s->proplist);
541
542 if (s->ports) {
543 pa_device_port *p;
544
545 while ((p = pa_hashmap_steal_first(s->ports)))
546 pa_device_port_free(p);
547
548 pa_hashmap_free(s->ports, NULL, NULL);
549 }
550
551 pa_xfree(s);
552 }
553
554 /* Called from main context */
555 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
556 pa_sink_assert_ref(s);
557 pa_assert_ctl_context();
558
559 s->asyncmsgq = q;
560
561 if (s->monitor_source)
562 pa_source_set_asyncmsgq(s->monitor_source, q);
563 }
564
565 /* Called from IO context, or before _put() from main context */
566 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
567 pa_sink_assert_ref(s);
568 pa_sink_assert_io_context(s);
569
570 s->thread_info.rtpoll = p;
571
572 if (s->monitor_source)
573 pa_source_set_rtpoll(s->monitor_source, p);
574 }
575
576 /* Called from main context */
577 int pa_sink_update_status(pa_sink*s) {
578 pa_sink_assert_ref(s);
579 pa_assert_ctl_context();
580 pa_assert(PA_SINK_IS_LINKED(s->state));
581
582 if (s->state == PA_SINK_SUSPENDED)
583 return 0;
584
585 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
586 }
587
588 /* Called from main context */
589 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
590 pa_sink_assert_ref(s);
591 pa_assert_ctl_context();
592 pa_assert(PA_SINK_IS_LINKED(s->state));
593 pa_assert(cause != 0);
594
595 if (suspend) {
596 s->suspend_cause |= cause;
597 s->monitor_source->suspend_cause |= cause;
598 } else {
599 s->suspend_cause &= ~cause;
600 s->monitor_source->suspend_cause &= ~cause;
601 }
602
603 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
604 return 0;
605
606 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
607
608 if (s->suspend_cause)
609 return sink_set_state(s, PA_SINK_SUSPENDED);
610 else
611 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
612 }
613
614 /* Called from main context */
615 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
616 pa_sink_input *i, *n;
617 uint32_t idx;
618
619 pa_sink_assert_ref(s);
620 pa_assert_ctl_context();
621 pa_assert(PA_SINK_IS_LINKED(s->state));
622
623 if (!q)
624 q = pa_queue_new();
625
626 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
627 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
628
629 pa_sink_input_ref(i);
630
631 if (pa_sink_input_start_move(i) >= 0)
632 pa_queue_push(q, i);
633 else
634 pa_sink_input_unref(i);
635 }
636
637 return q;
638 }
639
640 /* Called from main context */
641 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
642 pa_sink_input *i;
643
644 pa_sink_assert_ref(s);
645 pa_assert_ctl_context();
646 pa_assert(PA_SINK_IS_LINKED(s->state));
647 pa_assert(q);
648
649 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
650 if (pa_sink_input_finish_move(i, s, save) < 0)
651 pa_sink_input_fail_move(i);
652
653 pa_sink_input_unref(i);
654 }
655
656 pa_queue_free(q, NULL, NULL);
657 }
658
659 /* Called from main context */
660 void pa_sink_move_all_fail(pa_queue *q) {
661 pa_sink_input *i;
662
663 pa_assert_ctl_context();
664 pa_assert(q);
665
666 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
667 pa_sink_input_fail_move(i);
668 pa_sink_input_unref(i);
669 }
670
671 pa_queue_free(q, NULL, NULL);
672 }
673
674 /* Called from IO thread context */
675 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
676 pa_sink_input *i;
677 void *state = NULL;
678
679 pa_sink_assert_ref(s);
680 pa_sink_assert_io_context(s);
681 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
682
683 /* If nobody requested this and this is actually no real rewind
684 * then we can short cut this. Please note that this means that
685 * not all rewind requests triggered upstream will always be
686 * translated in actual requests! */
687 if (!s->thread_info.rewind_requested && nbytes <= 0)
688 return;
689
690 s->thread_info.rewind_nbytes = 0;
691 s->thread_info.rewind_requested = FALSE;
692
693 if (s->thread_info.state == PA_SINK_SUSPENDED)
694 return;
695
696 if (nbytes > 0)
697 pa_log_debug("Processing rewind...");
698
699 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
700 pa_sink_input_assert_ref(i);
701 pa_sink_input_process_rewind(i, nbytes);
702 }
703
704 if (nbytes > 0)
705 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
706 pa_source_process_rewind(s->monitor_source, nbytes);
707 }
708
709 /* Called from IO thread context */
710 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
711 pa_sink_input *i;
712 unsigned n = 0;
713 void *state = NULL;
714 size_t mixlength = *length;
715
716 pa_sink_assert_ref(s);
717 pa_sink_assert_io_context(s);
718 pa_assert(info);
719
720 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
721 pa_sink_input_assert_ref(i);
722
723 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
724
725 if (mixlength == 0 || info->chunk.length < mixlength)
726 mixlength = info->chunk.length;
727
728 if (pa_memblock_is_silence(info->chunk.memblock)) {
729 pa_memblock_unref(info->chunk.memblock);
730 continue;
731 }
732
733 info->userdata = pa_sink_input_ref(i);
734
735 pa_assert(info->chunk.memblock);
736 pa_assert(info->chunk.length > 0);
737
738 info++;
739 n++;
740 maxinfo--;
741 }
742
743 if (mixlength > 0)
744 *length = mixlength;
745
746 return n;
747 }
748
749 /* Called from IO thread context */
750 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
751 pa_sink_input *i;
752 void *state = NULL;
753 unsigned p = 0;
754 unsigned n_unreffed = 0;
755
756 pa_sink_assert_ref(s);
757 pa_sink_assert_io_context(s);
758 pa_assert(result);
759 pa_assert(result->memblock);
760 pa_assert(result->length > 0);
761
762 /* We optimize for the case where the order of the inputs has not changed */
763
764 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
765 unsigned j;
766 pa_mix_info* m = NULL;
767
768 pa_sink_input_assert_ref(i);
769
770 /* Let's try to find the matching entry info the pa_mix_info array */
771 for (j = 0; j < n; j ++) {
772
773 if (info[p].userdata == i) {
774 m = info + p;
775 break;
776 }
777
778 p++;
779 if (p >= n)
780 p = 0;
781 }
782
783 /* Drop read data */
784 pa_sink_input_drop(i, result->length);
785
786 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
787
788 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
789 void *ostate = NULL;
790 pa_source_output *o;
791 pa_memchunk c;
792
793 if (m && m->chunk.memblock) {
794 c = m->chunk;
795 pa_memblock_ref(c.memblock);
796 pa_assert(result->length <= c.length);
797 c.length = result->length;
798
799 pa_memchunk_make_writable(&c, 0);
800 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
801 } else {
802 c = s->silence;
803 pa_memblock_ref(c.memblock);
804 pa_assert(result->length <= c.length);
805 c.length = result->length;
806 }
807
808 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
809 pa_source_output_assert_ref(o);
810 pa_assert(o->direct_on_input == i);
811 pa_source_post_direct(s->monitor_source, o, &c);
812 }
813
814 pa_memblock_unref(c.memblock);
815 }
816 }
817
818 if (m) {
819 if (m->chunk.memblock)
820 pa_memblock_unref(m->chunk.memblock);
821 pa_memchunk_reset(&m->chunk);
822
823 pa_sink_input_unref(m->userdata);
824 m->userdata = NULL;
825
826 n_unreffed += 1;
827 }
828 }
829
830 /* Now drop references to entries that are included in the
831 * pa_mix_info array but don't exist anymore */
832
833 if (n_unreffed < n) {
834 for (; n > 0; info++, n--) {
835 if (info->userdata)
836 pa_sink_input_unref(info->userdata);
837 if (info->chunk.memblock)
838 pa_memblock_unref(info->chunk.memblock);
839 }
840 }
841
842 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
843 pa_source_post(s->monitor_source, result);
844 }
845
846 /* Called from IO thread context */
847 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
848 pa_mix_info info[MAX_MIX_CHANNELS];
849 unsigned n;
850 size_t block_size_max;
851
852 pa_sink_assert_ref(s);
853 pa_sink_assert_io_context(s);
854 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
855 pa_assert(pa_frame_aligned(length, &s->sample_spec));
856 pa_assert(result);
857
858 pa_sink_ref(s);
859
860 pa_assert(!s->thread_info.rewind_requested);
861 pa_assert(s->thread_info.rewind_nbytes == 0);
862
863 if (s->thread_info.state == PA_SINK_SUSPENDED) {
864 result->memblock = pa_memblock_ref(s->silence.memblock);
865 result->index = s->silence.index;
866 result->length = PA_MIN(s->silence.length, length);
867 return;
868 }
869
870 if (length <= 0)
871 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
872
873 block_size_max = pa_mempool_block_size_max(s->core->mempool);
874 if (length > block_size_max)
875 length = pa_frame_align(block_size_max, &s->sample_spec);
876
877 pa_assert(length > 0);
878
879 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
880
881 if (n == 0) {
882
883 *result = s->silence;
884 pa_memblock_ref(result->memblock);
885
886 if (result->length > length)
887 result->length = length;
888
889 } else if (n == 1) {
890 pa_cvolume volume;
891
892 *result = info[0].chunk;
893 pa_memblock_ref(result->memblock);
894
895 if (result->length > length)
896 result->length = length;
897
898 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
899
900 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
901 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
902 pa_memblock_unref(result->memblock);
903 pa_silence_memchunk_get(&s->core->silence_cache,
904 s->core->mempool,
905 result,
906 &s->sample_spec,
907 result->length);
908 } else {
909 pa_memchunk_make_writable(result, 0);
910 pa_volume_memchunk(result, &s->sample_spec, &volume);
911 }
912 }
913 } else {
914 void *ptr;
915 result->memblock = pa_memblock_new(s->core->mempool, length);
916
917 ptr = pa_memblock_acquire(result->memblock);
918 result->length = pa_mix(info, n,
919 ptr, length,
920 &s->sample_spec,
921 &s->thread_info.soft_volume,
922 s->thread_info.soft_muted);
923 pa_memblock_release(result->memblock);
924
925 result->index = 0;
926 }
927
928 inputs_drop(s, info, n, result);
929
930 pa_sink_unref(s);
931 }
932
933 /* Called from IO thread context */
934 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
935 pa_mix_info info[MAX_MIX_CHANNELS];
936 unsigned n;
937 size_t length, block_size_max;
938
939 pa_sink_assert_ref(s);
940 pa_sink_assert_io_context(s);
941 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
942 pa_assert(target);
943 pa_assert(target->memblock);
944 pa_assert(target->length > 0);
945 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
946
947 pa_sink_ref(s);
948
949 pa_assert(!s->thread_info.rewind_requested);
950 pa_assert(s->thread_info.rewind_nbytes == 0);
951
952 if (s->thread_info.state == PA_SINK_SUSPENDED) {
953 pa_silence_memchunk(target, &s->sample_spec);
954 return;
955 }
956
957 length = target->length;
958 block_size_max = pa_mempool_block_size_max(s->core->mempool);
959 if (length > block_size_max)
960 length = pa_frame_align(block_size_max, &s->sample_spec);
961
962 pa_assert(length > 0);
963
964 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
965
966 if (n == 0) {
967 if (target->length > length)
968 target->length = length;
969
970 pa_silence_memchunk(target, &s->sample_spec);
971 } else if (n == 1) {
972 pa_cvolume volume;
973
974 if (target->length > length)
975 target->length = length;
976
977 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
978
979 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
980 pa_silence_memchunk(target, &s->sample_spec);
981 else {
982 pa_memchunk vchunk;
983
984 vchunk = info[0].chunk;
985 pa_memblock_ref(vchunk.memblock);
986
987 if (vchunk.length > length)
988 vchunk.length = length;
989
990 if (!pa_cvolume_is_norm(&volume)) {
991 pa_memchunk_make_writable(&vchunk, 0);
992 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
993 }
994
995 pa_memchunk_memcpy(target, &vchunk);
996 pa_memblock_unref(vchunk.memblock);
997 }
998
999 } else {
1000 void *ptr;
1001
1002 ptr = pa_memblock_acquire(target->memblock);
1003
1004 target->length = pa_mix(info, n,
1005 (uint8_t*) ptr + target->index, length,
1006 &s->sample_spec,
1007 &s->thread_info.soft_volume,
1008 s->thread_info.soft_muted);
1009
1010 pa_memblock_release(target->memblock);
1011 }
1012
1013 inputs_drop(s, info, n, target);
1014
1015 pa_sink_unref(s);
1016 }
1017
1018 /* Called from IO thread context */
1019 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1020 pa_memchunk chunk;
1021 size_t l, d;
1022
1023 pa_sink_assert_ref(s);
1024 pa_sink_assert_io_context(s);
1025 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1026 pa_assert(target);
1027 pa_assert(target->memblock);
1028 pa_assert(target->length > 0);
1029 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1030
1031 pa_sink_ref(s);
1032
1033 pa_assert(!s->thread_info.rewind_requested);
1034 pa_assert(s->thread_info.rewind_nbytes == 0);
1035
1036 l = target->length;
1037 d = 0;
1038 while (l > 0) {
1039 chunk = *target;
1040 chunk.index += d;
1041 chunk.length -= d;
1042
1043 pa_sink_render_into(s, &chunk);
1044
1045 d += chunk.length;
1046 l -= chunk.length;
1047 }
1048
1049 pa_sink_unref(s);
1050 }
1051
1052 /* Called from IO thread context */
1053 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1054 pa_mix_info info[MAX_MIX_CHANNELS];
1055 size_t length1st = length;
1056 unsigned n;
1057
1058 pa_sink_assert_ref(s);
1059 pa_sink_assert_io_context(s);
1060 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1061 pa_assert(length > 0);
1062 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1063 pa_assert(result);
1064
1065 pa_sink_ref(s);
1066
1067 pa_assert(!s->thread_info.rewind_requested);
1068 pa_assert(s->thread_info.rewind_nbytes == 0);
1069
1070 pa_assert(length > 0);
1071
1072 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1073
1074 if (n == 0) {
1075 pa_silence_memchunk_get(&s->core->silence_cache,
1076 s->core->mempool,
1077 result,
1078 &s->sample_spec,
1079 length1st);
1080 } else if (n == 1) {
1081 pa_cvolume volume;
1082
1083 *result = info[0].chunk;
1084 pa_memblock_ref(result->memblock);
1085
1086 if (result->length > length)
1087 result->length = length;
1088
1089 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1090
1091 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1092 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1093 pa_memblock_unref(result->memblock);
1094 pa_silence_memchunk_get(&s->core->silence_cache,
1095 s->core->mempool,
1096 result,
1097 &s->sample_spec,
1098 result->length);
1099 } else {
1100 pa_memchunk_make_writable(result, length);
1101 pa_volume_memchunk(result, &s->sample_spec, &volume);
1102 }
1103 }
1104 } else {
1105 void *ptr;
1106
1107 result->index = 0;
1108 result->memblock = pa_memblock_new(s->core->mempool, length);
1109
1110 ptr = pa_memblock_acquire(result->memblock);
1111
1112 result->length = pa_mix(info, n,
1113 (uint8_t*) ptr + result->index, length1st,
1114 &s->sample_spec,
1115 &s->thread_info.soft_volume,
1116 s->thread_info.soft_muted);
1117
1118 pa_memblock_release(result->memblock);
1119 }
1120
1121 inputs_drop(s, info, n, result);
1122
1123 if (result->length < length) {
1124 pa_memchunk chunk;
1125 size_t l, d;
1126 pa_memchunk_make_writable(result, length);
1127
1128 l = length - result->length;
1129 d = result->index + result->length;
1130 while (l > 0) {
1131 chunk = *result;
1132 chunk.index = d;
1133 chunk.length = l;
1134
1135 pa_sink_render_into(s, &chunk);
1136
1137 d += chunk.length;
1138 l -= chunk.length;
1139 }
1140 result->length = length;
1141 }
1142
1143 pa_sink_unref(s);
1144 }
1145
1146 /* Called from main thread */
1147 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1148 pa_usec_t usec = 0;
1149
1150 pa_sink_assert_ref(s);
1151 pa_assert_ctl_context();
1152 pa_assert(PA_SINK_IS_LINKED(s->state));
1153
1154 /* The returned value is supposed to be in the time domain of the sound card! */
1155
1156 if (s->state == PA_SINK_SUSPENDED)
1157 return 0;
1158
1159 if (!(s->flags & PA_SINK_LATENCY))
1160 return 0;
1161
1162 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1163
1164 return usec;
1165 }
1166
1167 /* Called from IO thread */
1168 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1169 pa_usec_t usec = 0;
1170 pa_msgobject *o;
1171
1172 pa_sink_assert_ref(s);
1173 pa_sink_assert_io_context(s);
1174 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1175
1176 /* The returned value is supposed to be in the time domain of the sound card! */
1177
1178 if (s->thread_info.state == PA_SINK_SUSPENDED)
1179 return 0;
1180
1181 if (!(s->flags & PA_SINK_LATENCY))
1182 return 0;
1183
1184 o = PA_MSGOBJECT(s);
1185
1186 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1187
1188 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1189 return -1;
1190
1191 return usec;
1192 }
1193
1194 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1195 unsigned c;
1196
1197 pa_sink_input_assert_ref(i);
1198 pa_assert(new_volume->channels == i->sample_spec.channels);
1199
1200 /*
1201 * This basically calculates:
1202 *
1203 * i->relative_volume := i->virtual_volume / new_volume
1204 * i->soft_volume := i->relative_volume * i->volume_factor
1205 */
1206
1207 /* The new sink volume passed in here must already be remapped to
1208 * the sink input's channel map! */
1209
1210 i->soft_volume.channels = i->sample_spec.channels;
1211
1212 for (c = 0; c < i->sample_spec.channels; c++)
1213
1214 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1215 /* We leave i->relative_volume untouched */
1216 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1217 else {
1218 i->relative_volume[c] =
1219 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1220 pa_sw_volume_to_linear(new_volume->values[c]);
1221
1222 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1223 i->relative_volume[c] *
1224 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1225 }
1226
1227 /* Hooks have the ability to play games with i->soft_volume */
1228 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1229
1230 /* We don't copy the soft_volume to the thread_info data
1231 * here. That must be done by the caller */
1232 }
1233
1234 /* Called from main thread */
1235 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1236 pa_sink_input *i;
1237 uint32_t idx;
1238
1239 pa_sink_assert_ref(s);
1240 pa_assert_ctl_context();
1241 pa_assert(new_volume);
1242 pa_assert(PA_SINK_IS_LINKED(s->state));
1243 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1244
1245 /* This is called whenever a sink input volume changes or a sink
1246 * input is added/removed and we might need to fix up the sink
1247 * volume accordingly. Please note that we don't actually update
1248 * the sinks volume here, we only return how it needs to be
1249 * updated. The caller should then call pa_sink_set_volume().*/
1250
1251 if (pa_idxset_isempty(s->inputs)) {
1252 /* In the special case that we have no sink input we leave the
1253 * volume unmodified. */
1254 *new_volume = s->reference_volume;
1255 return;
1256 }
1257
1258 pa_cvolume_mute(new_volume, s->channel_map.channels);
1259
1260 /* First let's determine the new maximum volume of all inputs
1261 * connected to this sink */
1262 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1263 unsigned c;
1264 pa_cvolume remapped_volume;
1265
1266 remapped_volume = i->virtual_volume;
1267 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1268
1269 for (c = 0; c < new_volume->channels; c++)
1270 if (remapped_volume.values[c] > new_volume->values[c])
1271 new_volume->values[c] = remapped_volume.values[c];
1272 }
1273
1274 /* Then, let's update the soft volumes of all inputs connected
1275 * to this sink */
1276 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1277 pa_cvolume remapped_new_volume;
1278
1279 remapped_new_volume = *new_volume;
1280 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1281 compute_new_soft_volume(i, &remapped_new_volume);
1282
1283 /* We don't copy soft_volume to the thread_info data here
1284 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1285 * want the update to be atomically with the sink volume
1286 * update, hence we do it within the pa_sink_set_volume() call
1287 * below */
1288 }
1289 }
1290
1291 /* Called from main thread */
1292 void pa_sink_propagate_flat_volume(pa_sink *s) {
1293 pa_sink_input *i;
1294 uint32_t idx;
1295
1296 pa_sink_assert_ref(s);
1297 pa_assert_ctl_context();
1298 pa_assert(PA_SINK_IS_LINKED(s->state));
1299 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1300
1301 /* This is called whenever the sink volume changes that is not
1302 * caused by a sink input volume change. We need to fix up the
1303 * sink input volumes accordingly */
1304
1305 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1306 pa_cvolume sink_volume, new_virtual_volume;
1307 unsigned c;
1308
1309 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1310
1311 sink_volume = s->virtual_volume;
1312 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1313
1314 for (c = 0; c < i->sample_spec.channels; c++)
1315 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1316 i->relative_volume[c] *
1317 pa_sw_volume_to_linear(sink_volume.values[c]));
1318
1319 new_virtual_volume.channels = i->sample_spec.channels;
1320
1321 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1322 i->virtual_volume = new_virtual_volume;
1323
1324 /* Hmm, the soft volume might no longer actually match
1325 * what has been chosen as new virtual volume here,
1326 * especially when the old volume was
1327 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1328 * volumes here. */
1329 compute_new_soft_volume(i, &sink_volume);
1330
1331 /* The virtual volume changed, let's tell people so */
1332 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1333 }
1334 }
1335
1336 /* If the soft_volume of any of the sink inputs got changed, let's
1337 * make sure the thread copies are synced up. */
1338 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1339 }
1340
1341 /* Called from main thread */
1342 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1343 pa_bool_t virtual_volume_changed;
1344
1345 pa_sink_assert_ref(s);
1346 pa_assert_ctl_context();
1347 pa_assert(PA_SINK_IS_LINKED(s->state));
1348 pa_assert(volume);
1349 pa_assert(pa_cvolume_valid(volume));
1350 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1351
1352 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1353 s->virtual_volume = *volume;
1354 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1355
1356 if (become_reference)
1357 s->reference_volume = s->virtual_volume;
1358
1359 /* Propagate this volume change back to the inputs */
1360 if (virtual_volume_changed)
1361 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1362 pa_sink_propagate_flat_volume(s);
1363
1364 if (s->set_volume) {
1365 /* If we have a function set_volume(), then we do not apply a
1366 * soft volume by default. However, set_volume() is free to
1367 * apply one to s->soft_volume */
1368
1369 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1370 s->set_volume(s);
1371
1372 } else
1373 /* If we have no function set_volume(), then the soft volume
1374 * becomes the virtual volume */
1375 s->soft_volume = s->virtual_volume;
1376
1377 /* This tells the sink that soft and/or virtual volume changed */
1378 if (sendmsg)
1379 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1380
1381 if (virtual_volume_changed)
1382 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1383 }
1384
1385 /* Called from main thread. Only to be called by sink implementor */
1386 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1387 pa_sink_assert_ref(s);
1388 pa_assert_ctl_context();
1389 pa_assert(volume);
1390
1391 s->soft_volume = *volume;
1392
1393 if (PA_SINK_IS_LINKED(s->state))
1394 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1395 else
1396 s->thread_info.soft_volume = *volume;
1397 }
1398
1399 /* Called from main thread */
1400 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1401 pa_sink_assert_ref(s);
1402 pa_assert_ctl_context();
1403 pa_assert(PA_SINK_IS_LINKED(s->state));
1404
1405 if (s->refresh_volume || force_refresh) {
1406 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1407
1408 if (s->get_volume)
1409 s->get_volume(s);
1410
1411 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1412
1413 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1414
1415 s->reference_volume = s->virtual_volume;
1416
1417 /* Something got changed in the hardware. It probably
1418 * makes sense to save changed hw settings given that hw
1419 * volume changes not triggered by PA are almost certainly
1420 * done by the user. */
1421 s->save_volume = TRUE;
1422
1423 if (s->flags & PA_SINK_FLAT_VOLUME)
1424 pa_sink_propagate_flat_volume(s);
1425
1426 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1427 }
1428 }
1429
1430 return reference ? &s->reference_volume : &s->virtual_volume;
1431 }
1432
1433 /* Called from main thread */
1434 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1435 pa_sink_assert_ref(s);
1436 pa_assert_ctl_context();
1437 pa_assert(PA_SINK_IS_LINKED(s->state));
1438
1439 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1440 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1441 return;
1442
1443 s->reference_volume = s->virtual_volume = *new_volume;
1444 s->save_volume = TRUE;
1445
1446 if (s->flags & PA_SINK_FLAT_VOLUME)
1447 pa_sink_propagate_flat_volume(s);
1448
1449 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1450 }
1451
1452 /* Called from main thread */
1453 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1454 pa_bool_t old_muted;
1455
1456 pa_sink_assert_ref(s);
1457 pa_assert_ctl_context();
1458 pa_assert(PA_SINK_IS_LINKED(s->state));
1459
1460 old_muted = s->muted;
1461 s->muted = mute;
1462 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1463
1464 if (s->set_mute)
1465 s->set_mute(s);
1466
1467 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1468
1469 if (old_muted != s->muted)
1470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1471 }
1472
1473 /* Called from main thread */
1474 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1475
1476 pa_sink_assert_ref(s);
1477 pa_assert_ctl_context();
1478 pa_assert(PA_SINK_IS_LINKED(s->state));
1479
1480 if (s->refresh_muted || force_refresh) {
1481 pa_bool_t old_muted = s->muted;
1482
1483 if (s->get_mute)
1484 s->get_mute(s);
1485
1486 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1487
1488 if (old_muted != s->muted) {
1489 s->save_muted = TRUE;
1490
1491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1492
1493 /* Make sure the soft mute status stays in sync */
1494 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1495 }
1496 }
1497
1498
1499 return s->muted;
1500 }
1501
1502 /* Called from main thread */
1503 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1504 pa_sink_assert_ref(s);
1505 pa_assert_ctl_context();
1506 pa_assert(PA_SINK_IS_LINKED(s->state));
1507
1508 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1509
1510 if (s->muted == new_muted)
1511 return;
1512
1513 s->muted = new_muted;
1514 s->save_muted = TRUE;
1515
1516 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1517 }
1518
1519 /* Called from main thread */
1520 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1521 pa_sink_assert_ref(s);
1522 pa_assert_ctl_context();
1523
1524 if (p)
1525 pa_proplist_update(s->proplist, mode, p);
1526
1527 if (PA_SINK_IS_LINKED(s->state)) {
1528 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1529 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1530 }
1531
1532 return TRUE;
1533 }
1534
1535 /* Called from main thread */
1536 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1537 void pa_sink_set_description(pa_sink *s, const char *description) {
1538 const char *old;
1539 pa_sink_assert_ref(s);
1540 pa_assert_ctl_context();
1541
1542 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1543 return;
1544
1545 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1546
1547 if (old && description && pa_streq(old, description))
1548 return;
1549
1550 if (description)
1551 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1552 else
1553 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1554
1555 if (s->monitor_source) {
1556 char *n;
1557
1558 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1559 pa_source_set_description(s->monitor_source, n);
1560 pa_xfree(n);
1561 }
1562
1563 if (PA_SINK_IS_LINKED(s->state)) {
1564 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1565 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1566 }
1567 }
1568
1569 /* Called from main thread */
1570 unsigned pa_sink_linked_by(pa_sink *s) {
1571 unsigned ret;
1572
1573 pa_sink_assert_ref(s);
1574 pa_assert_ctl_context();
1575 pa_assert(PA_SINK_IS_LINKED(s->state));
1576
1577 ret = pa_idxset_size(s->inputs);
1578
1579 /* We add in the number of streams connected to us here. Please
1580 * note the asymmmetry to pa_sink_used_by()! */
1581
1582 if (s->monitor_source)
1583 ret += pa_source_linked_by(s->monitor_source);
1584
1585 return ret;
1586 }
1587
1588 /* Called from main thread */
1589 unsigned pa_sink_used_by(pa_sink *s) {
1590 unsigned ret;
1591
1592 pa_sink_assert_ref(s);
1593 pa_assert_ctl_context();
1594 pa_assert(PA_SINK_IS_LINKED(s->state));
1595
1596 ret = pa_idxset_size(s->inputs);
1597 pa_assert(ret >= s->n_corked);
1598
1599 /* Streams connected to our monitor source do not matter for
1600 * pa_sink_used_by()!.*/
1601
1602 return ret - s->n_corked;
1603 }
1604
1605 /* Called from main thread */
1606 unsigned pa_sink_check_suspend(pa_sink *s) {
1607 unsigned ret;
1608 pa_sink_input *i;
1609 uint32_t idx;
1610
1611 pa_sink_assert_ref(s);
1612 pa_assert_ctl_context();
1613
1614 if (!PA_SINK_IS_LINKED(s->state))
1615 return 0;
1616
1617 ret = 0;
1618
1619 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1620 pa_sink_input_state_t st;
1621
1622 st = pa_sink_input_get_state(i);
1623 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1624
1625 if (st == PA_SINK_INPUT_CORKED)
1626 continue;
1627
1628 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1629 continue;
1630
1631 ret ++;
1632 }
1633
1634 if (s->monitor_source)
1635 ret += pa_source_check_suspend(s->monitor_source);
1636
1637 return ret;
1638 }
1639
1640 /* Called from the IO thread */
1641 static void sync_input_volumes_within_thread(pa_sink *s) {
1642 pa_sink_input *i;
1643 void *state = NULL;
1644
1645 pa_sink_assert_ref(s);
1646 pa_sink_assert_io_context(s);
1647
1648 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1649 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1650 continue;
1651
1652 i->thread_info.soft_volume = i->soft_volume;
1653 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1654 }
1655 }
1656
1657 /* Called from IO thread, except when it is not */
1658 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1659 pa_sink *s = PA_SINK(o);
1660 pa_sink_assert_ref(s);
1661
1662 switch ((pa_sink_message_t) code) {
1663
1664 case PA_SINK_MESSAGE_ADD_INPUT: {
1665 pa_sink_input *i = PA_SINK_INPUT(userdata);
1666
1667 /* If you change anything here, make sure to change the
1668 * sink input handling a few lines down at
1669 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1670
1671 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1672
1673 /* Since the caller sleeps in pa_sink_input_put(), we can
1674 * safely access data outside of thread_info even though
1675 * it is mutable */
1676
1677 if ((i->thread_info.sync_prev = i->sync_prev)) {
1678 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1679 pa_assert(i->sync_prev->sync_next == i);
1680 i->thread_info.sync_prev->thread_info.sync_next = i;
1681 }
1682
1683 if ((i->thread_info.sync_next = i->sync_next)) {
1684 pa_assert(i->sink == i->thread_info.sync_next->sink);
1685 pa_assert(i->sync_next->sync_prev == i);
1686 i->thread_info.sync_next->thread_info.sync_prev = i;
1687 }
1688
1689 pa_assert(!i->thread_info.attached);
1690 i->thread_info.attached = TRUE;
1691
1692 if (i->attach)
1693 i->attach(i);
1694
1695 pa_sink_input_set_state_within_thread(i, i->state);
1696
1697 /* The requested latency of the sink input needs to be
1698 * fixed up and then configured on the sink */
1699
1700 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1701 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1702
1703 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1704 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1705
1706 /* We don't rewind here automatically. This is left to the
1707 * sink input implementor because some sink inputs need a
1708 * slow start, i.e. need some time to buffer client
1709 * samples before beginning streaming. */
1710
1711 /* In flat volume mode we need to update the volume as
1712 * well */
1713 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1714 }
1715
1716 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1717 pa_sink_input *i = PA_SINK_INPUT(userdata);
1718
1719 /* If you change anything here, make sure to change the
1720 * sink input handling a few lines down at
1721 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1722
1723 if (i->detach)
1724 i->detach(i);
1725
1726 pa_sink_input_set_state_within_thread(i, i->state);
1727
1728 pa_assert(i->thread_info.attached);
1729 i->thread_info.attached = FALSE;
1730
1731 /* Since the caller sleeps in pa_sink_input_unlink(),
1732 * we can safely access data outside of thread_info even
1733 * though it is mutable */
1734
1735 pa_assert(!i->sync_prev);
1736 pa_assert(!i->sync_next);
1737
1738 if (i->thread_info.sync_prev) {
1739 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1740 i->thread_info.sync_prev = NULL;
1741 }
1742
1743 if (i->thread_info.sync_next) {
1744 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1745 i->thread_info.sync_next = NULL;
1746 }
1747
1748 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1749 pa_sink_input_unref(i);
1750
1751 pa_sink_invalidate_requested_latency(s, TRUE);
1752 pa_sink_request_rewind(s, (size_t) -1);
1753
1754 /* In flat volume mode we need to update the volume as
1755 * well */
1756 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1757 }
1758
1759 case PA_SINK_MESSAGE_START_MOVE: {
1760 pa_sink_input *i = PA_SINK_INPUT(userdata);
1761
1762 /* We don't support moving synchronized streams. */
1763 pa_assert(!i->sync_prev);
1764 pa_assert(!i->sync_next);
1765 pa_assert(!i->thread_info.sync_next);
1766 pa_assert(!i->thread_info.sync_prev);
1767
1768 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1769 pa_usec_t usec = 0;
1770 size_t sink_nbytes, total_nbytes;
1771
1772 /* Get the latency of the sink */
1773 usec = pa_sink_get_latency_within_thread(s);
1774 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1775 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1776
1777 if (total_nbytes > 0) {
1778 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1779 i->thread_info.rewrite_flush = TRUE;
1780 pa_sink_input_process_rewind(i, sink_nbytes);
1781 }
1782 }
1783
1784 if (i->detach)
1785 i->detach(i);
1786
1787 pa_assert(i->thread_info.attached);
1788 i->thread_info.attached = FALSE;
1789
1790 /* Let's remove the sink input ...*/
1791 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1792 pa_sink_input_unref(i);
1793
1794 pa_sink_invalidate_requested_latency(s, TRUE);
1795
1796 pa_log_debug("Requesting rewind due to started move");
1797 pa_sink_request_rewind(s, (size_t) -1);
1798
1799 /* In flat volume mode we need to update the volume as
1800 * well */
1801 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1802 }
1803
1804 case PA_SINK_MESSAGE_FINISH_MOVE: {
1805 pa_sink_input *i = PA_SINK_INPUT(userdata);
1806
1807 /* We don't support moving synchronized streams. */
1808 pa_assert(!i->sync_prev);
1809 pa_assert(!i->sync_next);
1810 pa_assert(!i->thread_info.sync_next);
1811 pa_assert(!i->thread_info.sync_prev);
1812
1813 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1814
1815 pa_assert(!i->thread_info.attached);
1816 i->thread_info.attached = TRUE;
1817
1818 if (i->attach)
1819 i->attach(i);
1820
1821 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1822 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1823
1824 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1825 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1826
1827 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1828 pa_usec_t usec = 0;
1829 size_t nbytes;
1830
1831 /* Get the latency of the sink */
1832 usec = pa_sink_get_latency_within_thread(s);
1833 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1834
1835 if (nbytes > 0)
1836 pa_sink_input_drop(i, nbytes);
1837
1838 pa_log_debug("Requesting rewind due to finished move");
1839 pa_sink_request_rewind(s, nbytes);
1840 }
1841
1842 /* In flat volume mode we need to update the volume as
1843 * well */
1844 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1845 }
1846
1847 case PA_SINK_MESSAGE_SET_VOLUME:
1848
1849 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1850 s->thread_info.soft_volume = s->soft_volume;
1851 pa_sink_request_rewind(s, (size_t) -1);
1852 }
1853
1854 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1855 return 0;
1856
1857 /* Fall through ... */
1858
1859 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1860 sync_input_volumes_within_thread(s);
1861 return 0;
1862
1863 case PA_SINK_MESSAGE_GET_VOLUME:
1864 return 0;
1865
1866 case PA_SINK_MESSAGE_SET_MUTE:
1867
1868 if (s->thread_info.soft_muted != s->muted) {
1869 s->thread_info.soft_muted = s->muted;
1870 pa_sink_request_rewind(s, (size_t) -1);
1871 }
1872
1873 return 0;
1874
1875 case PA_SINK_MESSAGE_GET_MUTE:
1876 return 0;
1877
1878 case PA_SINK_MESSAGE_SET_STATE: {
1879
1880 pa_bool_t suspend_change =
1881 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1882 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1883
1884 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1885
1886 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1887 s->thread_info.rewind_nbytes = 0;
1888 s->thread_info.rewind_requested = FALSE;
1889 }
1890
1891 if (suspend_change) {
1892 pa_sink_input *i;
1893 void *state = NULL;
1894
1895 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1896 if (i->suspend_within_thread)
1897 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1898 }
1899
1900 return 0;
1901 }
1902
1903 case PA_SINK_MESSAGE_DETACH:
1904
1905 /* Detach all streams */
1906 pa_sink_detach_within_thread(s);
1907 return 0;
1908
1909 case PA_SINK_MESSAGE_ATTACH:
1910
1911 /* Reattach all streams */
1912 pa_sink_attach_within_thread(s);
1913 return 0;
1914
1915 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1916
1917 pa_usec_t *usec = userdata;
1918 *usec = pa_sink_get_requested_latency_within_thread(s);
1919
1920 if (*usec == (pa_usec_t) -1)
1921 *usec = s->thread_info.max_latency;
1922
1923 return 0;
1924 }
1925
1926 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1927 pa_usec_t *r = userdata;
1928
1929 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1930
1931 return 0;
1932 }
1933
1934 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1935 pa_usec_t *r = userdata;
1936
1937 r[0] = s->thread_info.min_latency;
1938 r[1] = s->thread_info.max_latency;
1939
1940 return 0;
1941 }
1942
1943 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
1944
1945 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1946 return 0;
1947
1948 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
1949
1950 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1951 return 0;
1952
1953 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1954
1955 *((size_t*) userdata) = s->thread_info.max_rewind;
1956 return 0;
1957
1958 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1959
1960 *((size_t*) userdata) = s->thread_info.max_request;
1961 return 0;
1962
1963 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1964
1965 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1966 return 0;
1967
1968 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1969
1970 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1971 return 0;
1972
1973 case PA_SINK_MESSAGE_GET_LATENCY:
1974 case PA_SINK_MESSAGE_MAX:
1975 ;
1976 }
1977
1978 return -1;
1979 }
1980
1981 /* Called from main thread */
1982 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
1983 pa_sink *sink;
1984 uint32_t idx;
1985 int ret = 0;
1986
1987 pa_core_assert_ref(c);
1988 pa_assert_ctl_context();
1989 pa_assert(cause != 0);
1990
1991 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
1992 int r;
1993
1994 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
1995 ret = r;
1996 }
1997
1998 return ret;
1999 }
2000
2001 /* Called from main thread */
2002 void pa_sink_detach(pa_sink *s) {
2003 pa_sink_assert_ref(s);
2004 pa_assert_ctl_context();
2005 pa_assert(PA_SINK_IS_LINKED(s->state));
2006
2007 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2008 }
2009
2010 /* Called from main thread */
2011 void pa_sink_attach(pa_sink *s) {
2012 pa_sink_assert_ref(s);
2013 pa_assert_ctl_context();
2014 pa_assert(PA_SINK_IS_LINKED(s->state));
2015
2016 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2017 }
2018
2019 /* Called from IO thread */
2020 void pa_sink_detach_within_thread(pa_sink *s) {
2021 pa_sink_input *i;
2022 void *state = NULL;
2023
2024 pa_sink_assert_ref(s);
2025 pa_sink_assert_io_context(s);
2026 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2027
2028 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2029 if (i->detach)
2030 i->detach(i);
2031
2032 if (s->monitor_source)
2033 pa_source_detach_within_thread(s->monitor_source);
2034 }
2035
2036 /* Called from IO thread */
2037 void pa_sink_attach_within_thread(pa_sink *s) {
2038 pa_sink_input *i;
2039 void *state = NULL;
2040
2041 pa_sink_assert_ref(s);
2042 pa_sink_assert_io_context(s);
2043 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2044
2045 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2046 if (i->attach)
2047 i->attach(i);
2048
2049 if (s->monitor_source)
2050 pa_source_attach_within_thread(s->monitor_source);
2051 }
2052
2053 /* Called from IO thread */
2054 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2055 pa_sink_assert_ref(s);
2056 pa_sink_assert_io_context(s);
2057 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2058
2059 if (s->thread_info.state == PA_SINK_SUSPENDED)
2060 return;
2061
2062 if (nbytes == (size_t) -1)
2063 nbytes = s->thread_info.max_rewind;
2064
2065 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2066
2067 if (s->thread_info.rewind_requested &&
2068 nbytes <= s->thread_info.rewind_nbytes)
2069 return;
2070
2071 s->thread_info.rewind_nbytes = nbytes;
2072 s->thread_info.rewind_requested = TRUE;
2073
2074 if (s->request_rewind)
2075 s->request_rewind(s);
2076 }
2077
2078 /* Called from IO thread */
2079 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2080 pa_usec_t result = (pa_usec_t) -1;
2081 pa_sink_input *i;
2082 void *state = NULL;
2083 pa_usec_t monitor_latency;
2084
2085 pa_sink_assert_ref(s);
2086 pa_sink_assert_io_context(s);
2087
2088 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2089 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2090
2091 if (s->thread_info.requested_latency_valid)
2092 return s->thread_info.requested_latency;
2093
2094 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2095 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2096 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2097 result = i->thread_info.requested_sink_latency;
2098
2099 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2100
2101 if (monitor_latency != (pa_usec_t) -1 &&
2102 (result == (pa_usec_t) -1 || result > monitor_latency))
2103 result = monitor_latency;
2104
2105 if (result != (pa_usec_t) -1)
2106 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2107
2108 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2109 /* Only cache if properly initialized */
2110 s->thread_info.requested_latency = result;
2111 s->thread_info.requested_latency_valid = TRUE;
2112 }
2113
2114 return result;
2115 }
2116
2117 /* Called from main thread */
2118 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2119 pa_usec_t usec = 0;
2120
2121 pa_sink_assert_ref(s);
2122 pa_assert_ctl_context();
2123 pa_assert(PA_SINK_IS_LINKED(s->state));
2124
2125 if (s->state == PA_SINK_SUSPENDED)
2126 return 0;
2127
2128 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2129 return usec;
2130 }
2131
2132 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2133 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2134 pa_sink_input *i;
2135 void *state = NULL;
2136
2137 pa_sink_assert_ref(s);
2138 pa_sink_assert_io_context(s);
2139
2140 if (max_rewind == s->thread_info.max_rewind)
2141 return;
2142
2143 s->thread_info.max_rewind = max_rewind;
2144
2145 if (PA_SINK_IS_LINKED(s->thread_info.state))
2146 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2147 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2148
2149 if (s->monitor_source)
2150 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2151 }
2152
2153 /* Called from main thread */
2154 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2155 pa_sink_assert_ref(s);
2156 pa_assert_ctl_context();
2157
2158 if (PA_SINK_IS_LINKED(s->state))
2159 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2160 else
2161 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2162 }
2163
2164 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2165 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2166 void *state = NULL;
2167
2168 pa_sink_assert_ref(s);
2169 pa_sink_assert_io_context(s);
2170
2171 if (max_request == s->thread_info.max_request)
2172 return;
2173
2174 s->thread_info.max_request = max_request;
2175
2176 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2177 pa_sink_input *i;
2178
2179 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2180 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2181 }
2182 }
2183
2184 /* Called from main thread */
2185 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2186 pa_sink_assert_ref(s);
2187 pa_assert_ctl_context();
2188
2189 if (PA_SINK_IS_LINKED(s->state))
2190 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2191 else
2192 pa_sink_set_max_request_within_thread(s, max_request);
2193 }
2194
2195 /* Called from IO thread */
2196 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2197 pa_sink_input *i;
2198 void *state = NULL;
2199
2200 pa_sink_assert_ref(s);
2201 pa_sink_assert_io_context(s);
2202
2203 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2204 s->thread_info.requested_latency_valid = FALSE;
2205 else if (dynamic)
2206 return;
2207
2208 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2209
2210 if (s->update_requested_latency)
2211 s->update_requested_latency(s);
2212
2213 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2214 if (i->update_sink_requested_latency)
2215 i->update_sink_requested_latency(i);
2216 }
2217 }
2218
2219 /* Called from main thread */
2220 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2221 pa_sink_assert_ref(s);
2222 pa_assert_ctl_context();
2223
2224 /* min_latency == 0: no limit
2225 * min_latency anything else: specified limit
2226 *
2227 * Similar for max_latency */
2228
2229 if (min_latency < ABSOLUTE_MIN_LATENCY)
2230 min_latency = ABSOLUTE_MIN_LATENCY;
2231
2232 if (max_latency <= 0 ||
2233 max_latency > ABSOLUTE_MAX_LATENCY)
2234 max_latency = ABSOLUTE_MAX_LATENCY;
2235
2236 pa_assert(min_latency <= max_latency);
2237
2238 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2239 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2240 max_latency == ABSOLUTE_MAX_LATENCY) ||
2241 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2242
2243 if (PA_SINK_IS_LINKED(s->state)) {
2244 pa_usec_t r[2];
2245
2246 r[0] = min_latency;
2247 r[1] = max_latency;
2248
2249 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2250 } else
2251 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2252 }
2253
2254 /* Called from main thread */
2255 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2256 pa_sink_assert_ref(s);
2257 pa_assert_ctl_context();
2258 pa_assert(min_latency);
2259 pa_assert(max_latency);
2260
2261 if (PA_SINK_IS_LINKED(s->state)) {
2262 pa_usec_t r[2] = { 0, 0 };
2263
2264 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2265
2266 *min_latency = r[0];
2267 *max_latency = r[1];
2268 } else {
2269 *min_latency = s->thread_info.min_latency;
2270 *max_latency = s->thread_info.max_latency;
2271 }
2272 }
2273
2274 /* Called from IO thread */
2275 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2276 void *state = NULL;
2277
2278 pa_sink_assert_ref(s);
2279 pa_sink_assert_io_context(s);
2280
2281 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2282 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2283 pa_assert(min_latency <= max_latency);
2284
2285 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2286 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2287 max_latency == ABSOLUTE_MAX_LATENCY) ||
2288 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2289
2290 s->thread_info.min_latency = min_latency;
2291 s->thread_info.max_latency = max_latency;
2292
2293 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2294 pa_sink_input *i;
2295
2296 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2297 if (i->update_sink_latency_range)
2298 i->update_sink_latency_range(i);
2299 }
2300
2301 pa_sink_invalidate_requested_latency(s, FALSE);
2302
2303 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2304 }
2305
2306 /* Called from main thread */
2307 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2308 pa_sink_assert_ref(s);
2309 pa_assert_ctl_context();
2310
2311 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2312 pa_assert(latency == 0);
2313 return;
2314 }
2315
2316 if (latency < ABSOLUTE_MIN_LATENCY)
2317 latency = ABSOLUTE_MIN_LATENCY;
2318
2319 if (latency > ABSOLUTE_MAX_LATENCY)
2320 latency = ABSOLUTE_MAX_LATENCY;
2321
2322 if (PA_SINK_IS_LINKED(s->state))
2323 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2324 else
2325 s->thread_info.fixed_latency = latency;
2326
2327 pa_source_set_fixed_latency(s->monitor_source, latency);
2328 }
2329
2330 /* Called from main thread */
2331 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2332 pa_usec_t latency;
2333
2334 pa_sink_assert_ref(s);
2335 pa_assert_ctl_context();
2336
2337 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2338 return 0;
2339
2340 if (PA_SINK_IS_LINKED(s->state))
2341 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2342 else
2343 latency = s->thread_info.fixed_latency;
2344
2345 return latency;
2346 }
2347
2348 /* Called from IO thread */
2349 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2350 pa_sink_assert_ref(s);
2351 pa_sink_assert_io_context(s);
2352
2353 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2354 pa_assert(latency == 0);
2355 return;
2356 }
2357
2358 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2359 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2360
2361 if (s->thread_info.fixed_latency == latency)
2362 return;
2363
2364 s->thread_info.fixed_latency = latency;
2365
2366 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2367 pa_sink_input *i;
2368 void *state = NULL;
2369
2370 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2371 if (i->update_sink_fixed_latency)
2372 i->update_sink_fixed_latency(i);
2373 }
2374
2375 pa_sink_invalidate_requested_latency(s, FALSE);
2376
2377 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2378 }
2379
2380 /* Called from main context */
2381 size_t pa_sink_get_max_rewind(pa_sink *s) {
2382 size_t r;
2383 pa_sink_assert_ref(s);
2384 pa_assert_ctl_context();
2385
2386 if (!PA_SINK_IS_LINKED(s->state))
2387 return s->thread_info.max_rewind;
2388
2389 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2390
2391 return r;
2392 }
2393
2394 /* Called from main context */
2395 size_t pa_sink_get_max_request(pa_sink *s) {
2396 size_t r;
2397 pa_sink_assert_ref(s);
2398 pa_assert_ctl_context();
2399
2400 if (!PA_SINK_IS_LINKED(s->state))
2401 return s->thread_info.max_request;
2402
2403 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2404
2405 return r;
2406 }
2407
2408 /* Called from main context */
2409 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2410 pa_device_port *port;
2411
2412 pa_sink_assert_ref(s);
2413 pa_assert_ctl_context();
2414
2415 if (!s->set_port) {
2416 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2417 return -PA_ERR_NOTIMPLEMENTED;
2418 }
2419
2420 if (!s->ports)
2421 return -PA_ERR_NOENTITY;
2422
2423 if (!(port = pa_hashmap_get(s->ports, name)))
2424 return -PA_ERR_NOENTITY;
2425
2426 if (s->active_port == port) {
2427 s->save_port = s->save_port || save;
2428 return 0;
2429 }
2430
2431 if ((s->set_port(s, port)) < 0)
2432 return -PA_ERR_NOENTITY;
2433
2434 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2435
2436 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2437
2438 s->active_port = port;
2439 s->save_port = save;
2440
2441 return 0;
2442 }
2443
2444 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2445 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2446
2447 pa_assert(p);
2448
2449 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2450 return TRUE;
2451
2452 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2453
2454 if (pa_streq(ff, "microphone"))
2455 t = "audio-input-microphone";
2456 else if (pa_streq(ff, "webcam"))
2457 t = "camera-web";
2458 else if (pa_streq(ff, "computer"))
2459 t = "computer";
2460 else if (pa_streq(ff, "handset"))
2461 t = "phone";
2462 else if (pa_streq(ff, "portable"))
2463 t = "multimedia-player";
2464 else if (pa_streq(ff, "tv"))
2465 t = "video-display";
2466
2467 /*
2468 * The following icons are not part of the icon naming spec,
2469 * because Rodney Dawes sucks as the maintainer of that spec.
2470 *
2471 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2472 */
2473 else if (pa_streq(ff, "headset"))
2474 t = "audio-headset";
2475 else if (pa_streq(ff, "headphone"))
2476 t = "audio-headphones";
2477 else if (pa_streq(ff, "speaker"))
2478 t = "audio-speakers";
2479 else if (pa_streq(ff, "hands-free"))
2480 t = "audio-handsfree";
2481 }
2482
2483 if (!t)
2484 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2485 if (pa_streq(c, "modem"))
2486 t = "modem";
2487
2488 if (!t) {
2489 if (is_sink)
2490 t = "audio-card";
2491 else
2492 t = "audio-input-microphone";
2493 }
2494
2495 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2496 if (strstr(profile, "analog"))
2497 s = "-analog";
2498 else if (strstr(profile, "iec958"))
2499 s = "-iec958";
2500 else if (strstr(profile, "hdmi"))
2501 s = "-hdmi";
2502 }
2503
2504 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2505
2506 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2507
2508 return TRUE;
2509 }
2510
2511 pa_bool_t pa_device_init_description(pa_proplist *p) {
2512 const char *s, *d = NULL, *k;
2513 pa_assert(p);
2514
2515 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2516 return TRUE;
2517
2518 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2519 if (pa_streq(s, "internal"))
2520 d = _("Internal Audio");
2521
2522 if (!d)
2523 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2524 if (pa_streq(s, "modem"))
2525 d = _("Modem");
2526
2527 if (!d)
2528 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2529
2530 if (!d)
2531 return FALSE;
2532
2533 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2534
2535 if (d && k)
2536 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2537 else if (d)
2538 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2539
2540 return TRUE;
2541 }
2542
2543 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2544 const char *s;
2545 pa_assert(p);
2546
2547 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2548 return TRUE;
2549
2550 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2551 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2552 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2553 return TRUE;
2554 }
2555
2556 return FALSE;
2557 }