]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: introduce pa_{sink|source}_update_flags()
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
309
310 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
311
312 if (s->card)
313 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
314
315 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
316 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
317 s->index,
318 s->name,
319 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
320 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
321 pt);
322 pa_xfree(pt);
323
324 pa_source_new_data_init(&source_data);
325 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
326 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
327 source_data.name = pa_sprintf_malloc("%s.monitor", name);
328 source_data.driver = data->driver;
329 source_data.module = data->module;
330 source_data.card = data->card;
331
332 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
333 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
334 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
335
336 s->monitor_source = pa_source_new(core, &source_data,
337 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
338 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
339
340 pa_source_new_data_done(&source_data);
341
342 if (!s->monitor_source) {
343 pa_sink_unlink(s);
344 pa_sink_unref(s);
345 return NULL;
346 }
347
348 s->monitor_source->monitor_of = s;
349
350 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
351 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
352 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
353
354 return s;
355 }
356
357 /* Called from main context */
358 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
359 int ret;
360 pa_bool_t suspend_change;
361 pa_sink_state_t original_state;
362
363 pa_assert(s);
364 pa_assert_ctl_context();
365
366 if (s->state == state)
367 return 0;
368
369 original_state = s->state;
370
371 suspend_change =
372 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
373 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
374
375 if (s->set_state)
376 if ((ret = s->set_state(s, state)) < 0)
377 return ret;
378
379 if (s->asyncmsgq)
380 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
381
382 if (s->set_state)
383 s->set_state(s, original_state);
384
385 return ret;
386 }
387
388 s->state = state;
389
390 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
391 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
392 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
393 }
394
395 if (suspend_change) {
396 pa_sink_input *i;
397 uint32_t idx;
398
399 /* We're suspending or resuming, tell everyone about it */
400
401 PA_IDXSET_FOREACH(i, s->inputs, idx)
402 if (s->state == PA_SINK_SUSPENDED &&
403 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
404 pa_sink_input_kill(i);
405 else if (i->suspend)
406 i->suspend(i, state == PA_SINK_SUSPENDED);
407
408 if (s->monitor_source)
409 pa_source_sync_suspend(s->monitor_source);
410 }
411
412 return 0;
413 }
414
415 /* Called from main context */
416 void pa_sink_put(pa_sink* s) {
417 pa_sink_assert_ref(s);
418 pa_assert_ctl_context();
419
420 pa_assert(s->state == PA_SINK_INIT);
421
422 /* The following fields must be initialized properly when calling _put() */
423 pa_assert(s->asyncmsgq);
424 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
425
426 /* Generally, flags should be initialized via pa_sink_new(). As a
427 * special exception we allow volume related flags to be set
428 * between _new() and _put(). */
429
430 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
431 s->flags |= PA_SINK_DECIBEL_VOLUME;
432
433 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
434 s->flags |= PA_SINK_FLAT_VOLUME;
435
436 s->thread_info.soft_volume = s->soft_volume;
437 s->thread_info.soft_muted = s->muted;
438
439 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
440 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
441 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
442 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
443 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
444
445 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
446 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
447 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
448
449 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
450
451 pa_source_put(s->monitor_source);
452
453 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
454 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
455 }
456
457 /* Called from main context */
458 void pa_sink_unlink(pa_sink* s) {
459 pa_bool_t linked;
460 pa_sink_input *i, *j = NULL;
461
462 pa_assert(s);
463 pa_assert_ctl_context();
464
465 /* Please note that pa_sink_unlink() does more than simply
466 * reversing pa_sink_put(). It also undoes the registrations
467 * already done in pa_sink_new()! */
468
469 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
470 * may be called multiple times on the same sink without bad
471 * effects. */
472
473 linked = PA_SINK_IS_LINKED(s->state);
474
475 if (linked)
476 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
477
478 if (s->state != PA_SINK_UNLINKED)
479 pa_namereg_unregister(s->core, s->name);
480 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
481
482 if (s->card)
483 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
484
485 while ((i = pa_idxset_first(s->inputs, NULL))) {
486 pa_assert(i != j);
487 pa_sink_input_kill(i);
488 j = i;
489 }
490
491 if (linked)
492 sink_set_state(s, PA_SINK_UNLINKED);
493 else
494 s->state = PA_SINK_UNLINKED;
495
496 reset_callbacks(s);
497
498 if (s->monitor_source)
499 pa_source_unlink(s->monitor_source);
500
501 if (linked) {
502 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
503 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
504 }
505 }
506
507 /* Called from main context */
508 static void sink_free(pa_object *o) {
509 pa_sink *s = PA_SINK(o);
510 pa_sink_input *i;
511
512 pa_assert(s);
513 pa_assert_ctl_context();
514 pa_assert(pa_sink_refcnt(s) == 0);
515
516 if (PA_SINK_IS_LINKED(s->state))
517 pa_sink_unlink(s);
518
519 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
520
521 if (s->monitor_source) {
522 pa_source_unref(s->monitor_source);
523 s->monitor_source = NULL;
524 }
525
526 pa_idxset_free(s->inputs, NULL, NULL);
527
528 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
529 pa_sink_input_unref(i);
530
531 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
532
533 if (s->silence.memblock)
534 pa_memblock_unref(s->silence.memblock);
535
536 pa_xfree(s->name);
537 pa_xfree(s->driver);
538
539 if (s->proplist)
540 pa_proplist_free(s->proplist);
541
542 if (s->ports) {
543 pa_device_port *p;
544
545 while ((p = pa_hashmap_steal_first(s->ports)))
546 pa_device_port_free(p);
547
548 pa_hashmap_free(s->ports, NULL, NULL);
549 }
550
551 pa_xfree(s);
552 }
553
554 /* Called from main context, and not while the IO thread is active, please */
555 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
556 pa_sink_assert_ref(s);
557 pa_assert_ctl_context();
558
559 s->asyncmsgq = q;
560
561 if (s->monitor_source)
562 pa_source_set_asyncmsgq(s->monitor_source, q);
563 }
564
565 /* Called from main context, and not while the IO thread is active, please */
566 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
567 pa_sink_assert_ref(s);
568 pa_assert_ctl_context();
569
570 if (mask == 0)
571 return;
572
573 /* For now, allow only a minimal set of flags to be changed. */
574 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
575
576 s->flags = (s->flags & ~mask) | (value & mask);
577
578 pa_source_update_flags(s->monitor_source,
579 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
580 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
581 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
582 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
583 }
584
585 /* Called from IO context, or before _put() from main context */
586 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
587 pa_sink_assert_ref(s);
588 pa_sink_assert_io_context(s);
589
590 s->thread_info.rtpoll = p;
591
592 if (s->monitor_source)
593 pa_source_set_rtpoll(s->monitor_source, p);
594 }
595
596 /* Called from main context */
597 int pa_sink_update_status(pa_sink*s) {
598 pa_sink_assert_ref(s);
599 pa_assert_ctl_context();
600 pa_assert(PA_SINK_IS_LINKED(s->state));
601
602 if (s->state == PA_SINK_SUSPENDED)
603 return 0;
604
605 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
606 }
607
608 /* Called from main context */
609 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
610 pa_sink_assert_ref(s);
611 pa_assert_ctl_context();
612 pa_assert(PA_SINK_IS_LINKED(s->state));
613 pa_assert(cause != 0);
614
615 if (suspend) {
616 s->suspend_cause |= cause;
617 s->monitor_source->suspend_cause |= cause;
618 } else {
619 s->suspend_cause &= ~cause;
620 s->monitor_source->suspend_cause &= ~cause;
621 }
622
623 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
624 return 0;
625
626 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
627
628 if (s->suspend_cause)
629 return sink_set_state(s, PA_SINK_SUSPENDED);
630 else
631 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
632 }
633
634 /* Called from main context */
635 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
636 pa_sink_input *i, *n;
637 uint32_t idx;
638
639 pa_sink_assert_ref(s);
640 pa_assert_ctl_context();
641 pa_assert(PA_SINK_IS_LINKED(s->state));
642
643 if (!q)
644 q = pa_queue_new();
645
646 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
647 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
648
649 pa_sink_input_ref(i);
650
651 if (pa_sink_input_start_move(i) >= 0)
652 pa_queue_push(q, i);
653 else
654 pa_sink_input_unref(i);
655 }
656
657 return q;
658 }
659
660 /* Called from main context */
661 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
662 pa_sink_input *i;
663
664 pa_sink_assert_ref(s);
665 pa_assert_ctl_context();
666 pa_assert(PA_SINK_IS_LINKED(s->state));
667 pa_assert(q);
668
669 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
670 if (pa_sink_input_finish_move(i, s, save) < 0)
671 pa_sink_input_fail_move(i);
672
673 pa_sink_input_unref(i);
674 }
675
676 pa_queue_free(q, NULL, NULL);
677 }
678
679 /* Called from main context */
680 void pa_sink_move_all_fail(pa_queue *q) {
681 pa_sink_input *i;
682
683 pa_assert_ctl_context();
684 pa_assert(q);
685
686 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
687 pa_sink_input_fail_move(i);
688 pa_sink_input_unref(i);
689 }
690
691 pa_queue_free(q, NULL, NULL);
692 }
693
694 /* Called from IO thread context */
695 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
696 pa_sink_input *i;
697 void *state = NULL;
698
699 pa_sink_assert_ref(s);
700 pa_sink_assert_io_context(s);
701 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
702
703 /* If nobody requested this and this is actually no real rewind
704 * then we can short cut this. Please note that this means that
705 * not all rewind requests triggered upstream will always be
706 * translated in actual requests! */
707 if (!s->thread_info.rewind_requested && nbytes <= 0)
708 return;
709
710 s->thread_info.rewind_nbytes = 0;
711 s->thread_info.rewind_requested = FALSE;
712
713 if (s->thread_info.state == PA_SINK_SUSPENDED)
714 return;
715
716 if (nbytes > 0)
717 pa_log_debug("Processing rewind...");
718
719 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
720 pa_sink_input_assert_ref(i);
721 pa_sink_input_process_rewind(i, nbytes);
722 }
723
724 if (nbytes > 0)
725 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
726 pa_source_process_rewind(s->monitor_source, nbytes);
727 }
728
729 /* Called from IO thread context */
730 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
731 pa_sink_input *i;
732 unsigned n = 0;
733 void *state = NULL;
734 size_t mixlength = *length;
735
736 pa_sink_assert_ref(s);
737 pa_sink_assert_io_context(s);
738 pa_assert(info);
739
740 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
741 pa_sink_input_assert_ref(i);
742
743 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
744
745 if (mixlength == 0 || info->chunk.length < mixlength)
746 mixlength = info->chunk.length;
747
748 if (pa_memblock_is_silence(info->chunk.memblock)) {
749 pa_memblock_unref(info->chunk.memblock);
750 continue;
751 }
752
753 info->userdata = pa_sink_input_ref(i);
754
755 pa_assert(info->chunk.memblock);
756 pa_assert(info->chunk.length > 0);
757
758 info++;
759 n++;
760 maxinfo--;
761 }
762
763 if (mixlength > 0)
764 *length = mixlength;
765
766 return n;
767 }
768
769 /* Called from IO thread context */
770 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
771 pa_sink_input *i;
772 void *state = NULL;
773 unsigned p = 0;
774 unsigned n_unreffed = 0;
775
776 pa_sink_assert_ref(s);
777 pa_sink_assert_io_context(s);
778 pa_assert(result);
779 pa_assert(result->memblock);
780 pa_assert(result->length > 0);
781
782 /* We optimize for the case where the order of the inputs has not changed */
783
784 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
785 unsigned j;
786 pa_mix_info* m = NULL;
787
788 pa_sink_input_assert_ref(i);
789
790 /* Let's try to find the matching entry info the pa_mix_info array */
791 for (j = 0; j < n; j ++) {
792
793 if (info[p].userdata == i) {
794 m = info + p;
795 break;
796 }
797
798 p++;
799 if (p >= n)
800 p = 0;
801 }
802
803 /* Drop read data */
804 pa_sink_input_drop(i, result->length);
805
806 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
807
808 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
809 void *ostate = NULL;
810 pa_source_output *o;
811 pa_memchunk c;
812
813 if (m && m->chunk.memblock) {
814 c = m->chunk;
815 pa_memblock_ref(c.memblock);
816 pa_assert(result->length <= c.length);
817 c.length = result->length;
818
819 pa_memchunk_make_writable(&c, 0);
820 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
821 } else {
822 c = s->silence;
823 pa_memblock_ref(c.memblock);
824 pa_assert(result->length <= c.length);
825 c.length = result->length;
826 }
827
828 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
829 pa_source_output_assert_ref(o);
830 pa_assert(o->direct_on_input == i);
831 pa_source_post_direct(s->monitor_source, o, &c);
832 }
833
834 pa_memblock_unref(c.memblock);
835 }
836 }
837
838 if (m) {
839 if (m->chunk.memblock)
840 pa_memblock_unref(m->chunk.memblock);
841 pa_memchunk_reset(&m->chunk);
842
843 pa_sink_input_unref(m->userdata);
844 m->userdata = NULL;
845
846 n_unreffed += 1;
847 }
848 }
849
850 /* Now drop references to entries that are included in the
851 * pa_mix_info array but don't exist anymore */
852
853 if (n_unreffed < n) {
854 for (; n > 0; info++, n--) {
855 if (info->userdata)
856 pa_sink_input_unref(info->userdata);
857 if (info->chunk.memblock)
858 pa_memblock_unref(info->chunk.memblock);
859 }
860 }
861
862 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
863 pa_source_post(s->monitor_source, result);
864 }
865
866 /* Called from IO thread context */
867 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
868 pa_mix_info info[MAX_MIX_CHANNELS];
869 unsigned n;
870 size_t block_size_max;
871
872 pa_sink_assert_ref(s);
873 pa_sink_assert_io_context(s);
874 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
875 pa_assert(pa_frame_aligned(length, &s->sample_spec));
876 pa_assert(result);
877
878 pa_sink_ref(s);
879
880 pa_assert(!s->thread_info.rewind_requested);
881 pa_assert(s->thread_info.rewind_nbytes == 0);
882
883 if (s->thread_info.state == PA_SINK_SUSPENDED) {
884 result->memblock = pa_memblock_ref(s->silence.memblock);
885 result->index = s->silence.index;
886 result->length = PA_MIN(s->silence.length, length);
887 return;
888 }
889
890 if (length <= 0)
891 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
892
893 block_size_max = pa_mempool_block_size_max(s->core->mempool);
894 if (length > block_size_max)
895 length = pa_frame_align(block_size_max, &s->sample_spec);
896
897 pa_assert(length > 0);
898
899 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
900
901 if (n == 0) {
902
903 *result = s->silence;
904 pa_memblock_ref(result->memblock);
905
906 if (result->length > length)
907 result->length = length;
908
909 } else if (n == 1) {
910 pa_cvolume volume;
911
912 *result = info[0].chunk;
913 pa_memblock_ref(result->memblock);
914
915 if (result->length > length)
916 result->length = length;
917
918 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
919
920 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
921 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
922 pa_memblock_unref(result->memblock);
923 pa_silence_memchunk_get(&s->core->silence_cache,
924 s->core->mempool,
925 result,
926 &s->sample_spec,
927 result->length);
928 } else {
929 pa_memchunk_make_writable(result, 0);
930 pa_volume_memchunk(result, &s->sample_spec, &volume);
931 }
932 }
933 } else {
934 void *ptr;
935 result->memblock = pa_memblock_new(s->core->mempool, length);
936
937 ptr = pa_memblock_acquire(result->memblock);
938 result->length = pa_mix(info, n,
939 ptr, length,
940 &s->sample_spec,
941 &s->thread_info.soft_volume,
942 s->thread_info.soft_muted);
943 pa_memblock_release(result->memblock);
944
945 result->index = 0;
946 }
947
948 inputs_drop(s, info, n, result);
949
950 pa_sink_unref(s);
951 }
952
953 /* Called from IO thread context */
954 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
955 pa_mix_info info[MAX_MIX_CHANNELS];
956 unsigned n;
957 size_t length, block_size_max;
958
959 pa_sink_assert_ref(s);
960 pa_sink_assert_io_context(s);
961 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
962 pa_assert(target);
963 pa_assert(target->memblock);
964 pa_assert(target->length > 0);
965 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
966
967 pa_sink_ref(s);
968
969 pa_assert(!s->thread_info.rewind_requested);
970 pa_assert(s->thread_info.rewind_nbytes == 0);
971
972 if (s->thread_info.state == PA_SINK_SUSPENDED) {
973 pa_silence_memchunk(target, &s->sample_spec);
974 return;
975 }
976
977 length = target->length;
978 block_size_max = pa_mempool_block_size_max(s->core->mempool);
979 if (length > block_size_max)
980 length = pa_frame_align(block_size_max, &s->sample_spec);
981
982 pa_assert(length > 0);
983
984 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
985
986 if (n == 0) {
987 if (target->length > length)
988 target->length = length;
989
990 pa_silence_memchunk(target, &s->sample_spec);
991 } else if (n == 1) {
992 pa_cvolume volume;
993
994 if (target->length > length)
995 target->length = length;
996
997 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
998
999 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1000 pa_silence_memchunk(target, &s->sample_spec);
1001 else {
1002 pa_memchunk vchunk;
1003
1004 vchunk = info[0].chunk;
1005 pa_memblock_ref(vchunk.memblock);
1006
1007 if (vchunk.length > length)
1008 vchunk.length = length;
1009
1010 if (!pa_cvolume_is_norm(&volume)) {
1011 pa_memchunk_make_writable(&vchunk, 0);
1012 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1013 }
1014
1015 pa_memchunk_memcpy(target, &vchunk);
1016 pa_memblock_unref(vchunk.memblock);
1017 }
1018
1019 } else {
1020 void *ptr;
1021
1022 ptr = pa_memblock_acquire(target->memblock);
1023
1024 target->length = pa_mix(info, n,
1025 (uint8_t*) ptr + target->index, length,
1026 &s->sample_spec,
1027 &s->thread_info.soft_volume,
1028 s->thread_info.soft_muted);
1029
1030 pa_memblock_release(target->memblock);
1031 }
1032
1033 inputs_drop(s, info, n, target);
1034
1035 pa_sink_unref(s);
1036 }
1037
1038 /* Called from IO thread context */
1039 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1040 pa_memchunk chunk;
1041 size_t l, d;
1042
1043 pa_sink_assert_ref(s);
1044 pa_sink_assert_io_context(s);
1045 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1046 pa_assert(target);
1047 pa_assert(target->memblock);
1048 pa_assert(target->length > 0);
1049 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1050
1051 pa_sink_ref(s);
1052
1053 pa_assert(!s->thread_info.rewind_requested);
1054 pa_assert(s->thread_info.rewind_nbytes == 0);
1055
1056 l = target->length;
1057 d = 0;
1058 while (l > 0) {
1059 chunk = *target;
1060 chunk.index += d;
1061 chunk.length -= d;
1062
1063 pa_sink_render_into(s, &chunk);
1064
1065 d += chunk.length;
1066 l -= chunk.length;
1067 }
1068
1069 pa_sink_unref(s);
1070 }
1071
1072 /* Called from IO thread context */
1073 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1074 pa_mix_info info[MAX_MIX_CHANNELS];
1075 size_t length1st = length;
1076 unsigned n;
1077
1078 pa_sink_assert_ref(s);
1079 pa_sink_assert_io_context(s);
1080 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1081 pa_assert(length > 0);
1082 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1083 pa_assert(result);
1084
1085 pa_sink_ref(s);
1086
1087 pa_assert(!s->thread_info.rewind_requested);
1088 pa_assert(s->thread_info.rewind_nbytes == 0);
1089
1090 pa_assert(length > 0);
1091
1092 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1093
1094 if (n == 0) {
1095 pa_silence_memchunk_get(&s->core->silence_cache,
1096 s->core->mempool,
1097 result,
1098 &s->sample_spec,
1099 length1st);
1100 } else if (n == 1) {
1101 pa_cvolume volume;
1102
1103 *result = info[0].chunk;
1104 pa_memblock_ref(result->memblock);
1105
1106 if (result->length > length)
1107 result->length = length;
1108
1109 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1110
1111 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1112 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1113 pa_memblock_unref(result->memblock);
1114 pa_silence_memchunk_get(&s->core->silence_cache,
1115 s->core->mempool,
1116 result,
1117 &s->sample_spec,
1118 result->length);
1119 } else {
1120 pa_memchunk_make_writable(result, length);
1121 pa_volume_memchunk(result, &s->sample_spec, &volume);
1122 }
1123 }
1124 } else {
1125 void *ptr;
1126
1127 result->index = 0;
1128 result->memblock = pa_memblock_new(s->core->mempool, length);
1129
1130 ptr = pa_memblock_acquire(result->memblock);
1131
1132 result->length = pa_mix(info, n,
1133 (uint8_t*) ptr + result->index, length1st,
1134 &s->sample_spec,
1135 &s->thread_info.soft_volume,
1136 s->thread_info.soft_muted);
1137
1138 pa_memblock_release(result->memblock);
1139 }
1140
1141 inputs_drop(s, info, n, result);
1142
1143 if (result->length < length) {
1144 pa_memchunk chunk;
1145 size_t l, d;
1146 pa_memchunk_make_writable(result, length);
1147
1148 l = length - result->length;
1149 d = result->index + result->length;
1150 while (l > 0) {
1151 chunk = *result;
1152 chunk.index = d;
1153 chunk.length = l;
1154
1155 pa_sink_render_into(s, &chunk);
1156
1157 d += chunk.length;
1158 l -= chunk.length;
1159 }
1160 result->length = length;
1161 }
1162
1163 pa_sink_unref(s);
1164 }
1165
1166 /* Called from main thread */
1167 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1168 pa_usec_t usec = 0;
1169
1170 pa_sink_assert_ref(s);
1171 pa_assert_ctl_context();
1172 pa_assert(PA_SINK_IS_LINKED(s->state));
1173
1174 /* The returned value is supposed to be in the time domain of the sound card! */
1175
1176 if (s->state == PA_SINK_SUSPENDED)
1177 return 0;
1178
1179 if (!(s->flags & PA_SINK_LATENCY))
1180 return 0;
1181
1182 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1183
1184 return usec;
1185 }
1186
1187 /* Called from IO thread */
1188 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1189 pa_usec_t usec = 0;
1190 pa_msgobject *o;
1191
1192 pa_sink_assert_ref(s);
1193 pa_sink_assert_io_context(s);
1194 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1195
1196 /* The returned value is supposed to be in the time domain of the sound card! */
1197
1198 if (s->thread_info.state == PA_SINK_SUSPENDED)
1199 return 0;
1200
1201 if (!(s->flags & PA_SINK_LATENCY))
1202 return 0;
1203
1204 o = PA_MSGOBJECT(s);
1205
1206 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1207
1208 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1209 return -1;
1210
1211 return usec;
1212 }
1213
1214 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1215 unsigned c;
1216
1217 pa_sink_input_assert_ref(i);
1218 pa_assert(new_volume->channels == i->sample_spec.channels);
1219
1220 /*
1221 * This basically calculates:
1222 *
1223 * i->relative_volume := i->virtual_volume / new_volume
1224 * i->soft_volume := i->relative_volume * i->volume_factor
1225 */
1226
1227 /* The new sink volume passed in here must already be remapped to
1228 * the sink input's channel map! */
1229
1230 i->soft_volume.channels = i->sample_spec.channels;
1231
1232 for (c = 0; c < i->sample_spec.channels; c++)
1233
1234 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1235 /* We leave i->relative_volume untouched */
1236 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1237 else {
1238 i->relative_volume[c] =
1239 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1240 pa_sw_volume_to_linear(new_volume->values[c]);
1241
1242 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1243 i->relative_volume[c] *
1244 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1245 }
1246
1247 /* Hooks have the ability to play games with i->soft_volume */
1248 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1249
1250 /* We don't copy the soft_volume to the thread_info data
1251 * here. That must be done by the caller */
1252 }
1253
1254 /* Called from main thread */
1255 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1256 pa_sink_input *i;
1257 uint32_t idx;
1258
1259 pa_sink_assert_ref(s);
1260 pa_assert_ctl_context();
1261 pa_assert(new_volume);
1262 pa_assert(PA_SINK_IS_LINKED(s->state));
1263 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1264
1265 /* This is called whenever a sink input volume changes or a sink
1266 * input is added/removed and we might need to fix up the sink
1267 * volume accordingly. Please note that we don't actually update
1268 * the sinks volume here, we only return how it needs to be
1269 * updated. The caller should then call pa_sink_set_volume().*/
1270
1271 if (pa_idxset_isempty(s->inputs)) {
1272 /* In the special case that we have no sink input we leave the
1273 * volume unmodified. */
1274 *new_volume = s->reference_volume;
1275 return;
1276 }
1277
1278 pa_cvolume_mute(new_volume, s->channel_map.channels);
1279
1280 /* First let's determine the new maximum volume of all inputs
1281 * connected to this sink */
1282 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1283 unsigned c;
1284 pa_cvolume remapped_volume;
1285
1286 remapped_volume = i->virtual_volume;
1287 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1288
1289 for (c = 0; c < new_volume->channels; c++)
1290 if (remapped_volume.values[c] > new_volume->values[c])
1291 new_volume->values[c] = remapped_volume.values[c];
1292 }
1293
1294 /* Then, let's update the soft volumes of all inputs connected
1295 * to this sink */
1296 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1297 pa_cvolume remapped_new_volume;
1298
1299 remapped_new_volume = *new_volume;
1300 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1301 compute_new_soft_volume(i, &remapped_new_volume);
1302
1303 /* We don't copy soft_volume to the thread_info data here
1304 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1305 * want the update to be atomically with the sink volume
1306 * update, hence we do it within the pa_sink_set_volume() call
1307 * below */
1308 }
1309 }
1310
1311 /* Called from main thread */
1312 void pa_sink_propagate_flat_volume(pa_sink *s) {
1313 pa_sink_input *i;
1314 uint32_t idx;
1315
1316 pa_sink_assert_ref(s);
1317 pa_assert_ctl_context();
1318 pa_assert(PA_SINK_IS_LINKED(s->state));
1319 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1320
1321 /* This is called whenever the sink volume changes that is not
1322 * caused by a sink input volume change. We need to fix up the
1323 * sink input volumes accordingly */
1324
1325 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1326 pa_cvolume sink_volume, new_virtual_volume;
1327 unsigned c;
1328
1329 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1330
1331 sink_volume = s->virtual_volume;
1332 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1333
1334 for (c = 0; c < i->sample_spec.channels; c++)
1335 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1336 i->relative_volume[c] *
1337 pa_sw_volume_to_linear(sink_volume.values[c]));
1338
1339 new_virtual_volume.channels = i->sample_spec.channels;
1340
1341 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1342 i->virtual_volume = new_virtual_volume;
1343
1344 /* Hmm, the soft volume might no longer actually match
1345 * what has been chosen as new virtual volume here,
1346 * especially when the old volume was
1347 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1348 * volumes here. */
1349 compute_new_soft_volume(i, &sink_volume);
1350
1351 /* The virtual volume changed, let's tell people so */
1352 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1353 }
1354 }
1355
1356 /* If the soft_volume of any of the sink inputs got changed, let's
1357 * make sure the thread copies are synced up. */
1358 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1359 }
1360
1361 /* Called from main thread */
1362 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1363 pa_bool_t virtual_volume_changed;
1364
1365 pa_sink_assert_ref(s);
1366 pa_assert_ctl_context();
1367 pa_assert(PA_SINK_IS_LINKED(s->state));
1368 pa_assert(volume);
1369 pa_assert(pa_cvolume_valid(volume));
1370 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1371
1372 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1373 s->virtual_volume = *volume;
1374 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1375
1376 if (become_reference)
1377 s->reference_volume = s->virtual_volume;
1378
1379 /* Propagate this volume change back to the inputs */
1380 if (virtual_volume_changed)
1381 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1382 pa_sink_propagate_flat_volume(s);
1383
1384 if (s->set_volume) {
1385 /* If we have a function set_volume(), then we do not apply a
1386 * soft volume by default. However, set_volume() is free to
1387 * apply one to s->soft_volume */
1388
1389 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1390 s->set_volume(s);
1391
1392 } else
1393 /* If we have no function set_volume(), then the soft volume
1394 * becomes the virtual volume */
1395 s->soft_volume = s->virtual_volume;
1396
1397 /* This tells the sink that soft and/or virtual volume changed */
1398 if (sendmsg)
1399 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1400
1401 if (virtual_volume_changed)
1402 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1403 }
1404
1405 /* Called from main thread. Only to be called by sink implementor */
1406 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1407 pa_sink_assert_ref(s);
1408 pa_assert_ctl_context();
1409 pa_assert(volume);
1410
1411 s->soft_volume = *volume;
1412
1413 if (PA_SINK_IS_LINKED(s->state))
1414 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1415 else
1416 s->thread_info.soft_volume = *volume;
1417 }
1418
1419 /* Called from main thread */
1420 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1421 pa_sink_assert_ref(s);
1422 pa_assert_ctl_context();
1423 pa_assert(PA_SINK_IS_LINKED(s->state));
1424
1425 if (s->refresh_volume || force_refresh) {
1426 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1427
1428 if (s->get_volume)
1429 s->get_volume(s);
1430
1431 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1432
1433 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1434
1435 s->reference_volume = s->virtual_volume;
1436
1437 /* Something got changed in the hardware. It probably
1438 * makes sense to save changed hw settings given that hw
1439 * volume changes not triggered by PA are almost certainly
1440 * done by the user. */
1441 s->save_volume = TRUE;
1442
1443 if (s->flags & PA_SINK_FLAT_VOLUME)
1444 pa_sink_propagate_flat_volume(s);
1445
1446 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1447 }
1448 }
1449
1450 return reference ? &s->reference_volume : &s->virtual_volume;
1451 }
1452
1453 /* Called from main thread */
1454 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1455 pa_sink_assert_ref(s);
1456 pa_assert_ctl_context();
1457 pa_assert(PA_SINK_IS_LINKED(s->state));
1458
1459 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1460 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1461 return;
1462
1463 s->reference_volume = s->virtual_volume = *new_volume;
1464 s->save_volume = TRUE;
1465
1466 if (s->flags & PA_SINK_FLAT_VOLUME)
1467 pa_sink_propagate_flat_volume(s);
1468
1469 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1470 }
1471
1472 /* Called from main thread */
1473 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1474 pa_bool_t old_muted;
1475
1476 pa_sink_assert_ref(s);
1477 pa_assert_ctl_context();
1478 pa_assert(PA_SINK_IS_LINKED(s->state));
1479
1480 old_muted = s->muted;
1481 s->muted = mute;
1482 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1483
1484 if (s->set_mute)
1485 s->set_mute(s);
1486
1487 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1488
1489 if (old_muted != s->muted)
1490 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1491 }
1492
1493 /* Called from main thread */
1494 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1495
1496 pa_sink_assert_ref(s);
1497 pa_assert_ctl_context();
1498 pa_assert(PA_SINK_IS_LINKED(s->state));
1499
1500 if (s->refresh_muted || force_refresh) {
1501 pa_bool_t old_muted = s->muted;
1502
1503 if (s->get_mute)
1504 s->get_mute(s);
1505
1506 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1507
1508 if (old_muted != s->muted) {
1509 s->save_muted = TRUE;
1510
1511 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1512
1513 /* Make sure the soft mute status stays in sync */
1514 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1515 }
1516 }
1517
1518
1519 return s->muted;
1520 }
1521
1522 /* Called from main thread */
1523 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1524 pa_sink_assert_ref(s);
1525 pa_assert_ctl_context();
1526 pa_assert(PA_SINK_IS_LINKED(s->state));
1527
1528 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1529
1530 if (s->muted == new_muted)
1531 return;
1532
1533 s->muted = new_muted;
1534 s->save_muted = TRUE;
1535
1536 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1537 }
1538
1539 /* Called from main thread */
1540 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1541 pa_sink_assert_ref(s);
1542 pa_assert_ctl_context();
1543
1544 if (p)
1545 pa_proplist_update(s->proplist, mode, p);
1546
1547 if (PA_SINK_IS_LINKED(s->state)) {
1548 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1549 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1550 }
1551
1552 return TRUE;
1553 }
1554
1555 /* Called from main thread */
1556 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1557 void pa_sink_set_description(pa_sink *s, const char *description) {
1558 const char *old;
1559 pa_sink_assert_ref(s);
1560 pa_assert_ctl_context();
1561
1562 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1563 return;
1564
1565 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1566
1567 if (old && description && pa_streq(old, description))
1568 return;
1569
1570 if (description)
1571 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1572 else
1573 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1574
1575 if (s->monitor_source) {
1576 char *n;
1577
1578 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1579 pa_source_set_description(s->monitor_source, n);
1580 pa_xfree(n);
1581 }
1582
1583 if (PA_SINK_IS_LINKED(s->state)) {
1584 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1585 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1586 }
1587 }
1588
1589 /* Called from main thread */
1590 unsigned pa_sink_linked_by(pa_sink *s) {
1591 unsigned ret;
1592
1593 pa_sink_assert_ref(s);
1594 pa_assert_ctl_context();
1595 pa_assert(PA_SINK_IS_LINKED(s->state));
1596
1597 ret = pa_idxset_size(s->inputs);
1598
1599 /* We add in the number of streams connected to us here. Please
1600 * note the asymmmetry to pa_sink_used_by()! */
1601
1602 if (s->monitor_source)
1603 ret += pa_source_linked_by(s->monitor_source);
1604
1605 return ret;
1606 }
1607
1608 /* Called from main thread */
1609 unsigned pa_sink_used_by(pa_sink *s) {
1610 unsigned ret;
1611
1612 pa_sink_assert_ref(s);
1613 pa_assert_ctl_context();
1614 pa_assert(PA_SINK_IS_LINKED(s->state));
1615
1616 ret = pa_idxset_size(s->inputs);
1617 pa_assert(ret >= s->n_corked);
1618
1619 /* Streams connected to our monitor source do not matter for
1620 * pa_sink_used_by()!.*/
1621
1622 return ret - s->n_corked;
1623 }
1624
1625 /* Called from main thread */
1626 unsigned pa_sink_check_suspend(pa_sink *s) {
1627 unsigned ret;
1628 pa_sink_input *i;
1629 uint32_t idx;
1630
1631 pa_sink_assert_ref(s);
1632 pa_assert_ctl_context();
1633
1634 if (!PA_SINK_IS_LINKED(s->state))
1635 return 0;
1636
1637 ret = 0;
1638
1639 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1640 pa_sink_input_state_t st;
1641
1642 st = pa_sink_input_get_state(i);
1643 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1644
1645 if (st == PA_SINK_INPUT_CORKED)
1646 continue;
1647
1648 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1649 continue;
1650
1651 ret ++;
1652 }
1653
1654 if (s->monitor_source)
1655 ret += pa_source_check_suspend(s->monitor_source);
1656
1657 return ret;
1658 }
1659
1660 /* Called from the IO thread */
1661 static void sync_input_volumes_within_thread(pa_sink *s) {
1662 pa_sink_input *i;
1663 void *state = NULL;
1664
1665 pa_sink_assert_ref(s);
1666 pa_sink_assert_io_context(s);
1667
1668 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1669 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1670 continue;
1671
1672 i->thread_info.soft_volume = i->soft_volume;
1673 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1674 }
1675 }
1676
1677 /* Called from IO thread, except when it is not */
1678 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1679 pa_sink *s = PA_SINK(o);
1680 pa_sink_assert_ref(s);
1681
1682 switch ((pa_sink_message_t) code) {
1683
1684 case PA_SINK_MESSAGE_ADD_INPUT: {
1685 pa_sink_input *i = PA_SINK_INPUT(userdata);
1686
1687 /* If you change anything here, make sure to change the
1688 * sink input handling a few lines down at
1689 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1690
1691 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1692
1693 /* Since the caller sleeps in pa_sink_input_put(), we can
1694 * safely access data outside of thread_info even though
1695 * it is mutable */
1696
1697 if ((i->thread_info.sync_prev = i->sync_prev)) {
1698 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1699 pa_assert(i->sync_prev->sync_next == i);
1700 i->thread_info.sync_prev->thread_info.sync_next = i;
1701 }
1702
1703 if ((i->thread_info.sync_next = i->sync_next)) {
1704 pa_assert(i->sink == i->thread_info.sync_next->sink);
1705 pa_assert(i->sync_next->sync_prev == i);
1706 i->thread_info.sync_next->thread_info.sync_prev = i;
1707 }
1708
1709 pa_assert(!i->thread_info.attached);
1710 i->thread_info.attached = TRUE;
1711
1712 if (i->attach)
1713 i->attach(i);
1714
1715 pa_sink_input_set_state_within_thread(i, i->state);
1716
1717 /* The requested latency of the sink input needs to be
1718 * fixed up and then configured on the sink */
1719
1720 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1721 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1722
1723 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1724 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1725
1726 /* We don't rewind here automatically. This is left to the
1727 * sink input implementor because some sink inputs need a
1728 * slow start, i.e. need some time to buffer client
1729 * samples before beginning streaming. */
1730
1731 /* In flat volume mode we need to update the volume as
1732 * well */
1733 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1734 }
1735
1736 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1737 pa_sink_input *i = PA_SINK_INPUT(userdata);
1738
1739 /* If you change anything here, make sure to change the
1740 * sink input handling a few lines down at
1741 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1742
1743 if (i->detach)
1744 i->detach(i);
1745
1746 pa_sink_input_set_state_within_thread(i, i->state);
1747
1748 pa_assert(i->thread_info.attached);
1749 i->thread_info.attached = FALSE;
1750
1751 /* Since the caller sleeps in pa_sink_input_unlink(),
1752 * we can safely access data outside of thread_info even
1753 * though it is mutable */
1754
1755 pa_assert(!i->sync_prev);
1756 pa_assert(!i->sync_next);
1757
1758 if (i->thread_info.sync_prev) {
1759 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1760 i->thread_info.sync_prev = NULL;
1761 }
1762
1763 if (i->thread_info.sync_next) {
1764 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1765 i->thread_info.sync_next = NULL;
1766 }
1767
1768 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1769 pa_sink_input_unref(i);
1770
1771 pa_sink_invalidate_requested_latency(s, TRUE);
1772 pa_sink_request_rewind(s, (size_t) -1);
1773
1774 /* In flat volume mode we need to update the volume as
1775 * well */
1776 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1777 }
1778
1779 case PA_SINK_MESSAGE_START_MOVE: {
1780 pa_sink_input *i = PA_SINK_INPUT(userdata);
1781
1782 /* We don't support moving synchronized streams. */
1783 pa_assert(!i->sync_prev);
1784 pa_assert(!i->sync_next);
1785 pa_assert(!i->thread_info.sync_next);
1786 pa_assert(!i->thread_info.sync_prev);
1787
1788 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1789 pa_usec_t usec = 0;
1790 size_t sink_nbytes, total_nbytes;
1791
1792 /* Get the latency of the sink */
1793 usec = pa_sink_get_latency_within_thread(s);
1794 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1795 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1796
1797 if (total_nbytes > 0) {
1798 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1799 i->thread_info.rewrite_flush = TRUE;
1800 pa_sink_input_process_rewind(i, sink_nbytes);
1801 }
1802 }
1803
1804 if (i->detach)
1805 i->detach(i);
1806
1807 pa_assert(i->thread_info.attached);
1808 i->thread_info.attached = FALSE;
1809
1810 /* Let's remove the sink input ...*/
1811 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1812 pa_sink_input_unref(i);
1813
1814 pa_sink_invalidate_requested_latency(s, TRUE);
1815
1816 pa_log_debug("Requesting rewind due to started move");
1817 pa_sink_request_rewind(s, (size_t) -1);
1818
1819 /* In flat volume mode we need to update the volume as
1820 * well */
1821 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1822 }
1823
1824 case PA_SINK_MESSAGE_FINISH_MOVE: {
1825 pa_sink_input *i = PA_SINK_INPUT(userdata);
1826
1827 /* We don't support moving synchronized streams. */
1828 pa_assert(!i->sync_prev);
1829 pa_assert(!i->sync_next);
1830 pa_assert(!i->thread_info.sync_next);
1831 pa_assert(!i->thread_info.sync_prev);
1832
1833 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1834
1835 pa_assert(!i->thread_info.attached);
1836 i->thread_info.attached = TRUE;
1837
1838 if (i->attach)
1839 i->attach(i);
1840
1841 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1842 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1843
1844 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1845 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1846
1847 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1848 pa_usec_t usec = 0;
1849 size_t nbytes;
1850
1851 /* Get the latency of the sink */
1852 usec = pa_sink_get_latency_within_thread(s);
1853 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1854
1855 if (nbytes > 0)
1856 pa_sink_input_drop(i, nbytes);
1857
1858 pa_log_debug("Requesting rewind due to finished move");
1859 pa_sink_request_rewind(s, nbytes);
1860 }
1861
1862 /* In flat volume mode we need to update the volume as
1863 * well */
1864 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1865 }
1866
1867 case PA_SINK_MESSAGE_SET_VOLUME:
1868
1869 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1870 s->thread_info.soft_volume = s->soft_volume;
1871 pa_sink_request_rewind(s, (size_t) -1);
1872 }
1873
1874 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1875 return 0;
1876
1877 /* Fall through ... */
1878
1879 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1880 sync_input_volumes_within_thread(s);
1881 return 0;
1882
1883 case PA_SINK_MESSAGE_GET_VOLUME:
1884 return 0;
1885
1886 case PA_SINK_MESSAGE_SET_MUTE:
1887
1888 if (s->thread_info.soft_muted != s->muted) {
1889 s->thread_info.soft_muted = s->muted;
1890 pa_sink_request_rewind(s, (size_t) -1);
1891 }
1892
1893 return 0;
1894
1895 case PA_SINK_MESSAGE_GET_MUTE:
1896 return 0;
1897
1898 case PA_SINK_MESSAGE_SET_STATE: {
1899
1900 pa_bool_t suspend_change =
1901 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1902 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1903
1904 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1905
1906 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1907 s->thread_info.rewind_nbytes = 0;
1908 s->thread_info.rewind_requested = FALSE;
1909 }
1910
1911 if (suspend_change) {
1912 pa_sink_input *i;
1913 void *state = NULL;
1914
1915 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1916 if (i->suspend_within_thread)
1917 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1918 }
1919
1920 return 0;
1921 }
1922
1923 case PA_SINK_MESSAGE_DETACH:
1924
1925 /* Detach all streams */
1926 pa_sink_detach_within_thread(s);
1927 return 0;
1928
1929 case PA_SINK_MESSAGE_ATTACH:
1930
1931 /* Reattach all streams */
1932 pa_sink_attach_within_thread(s);
1933 return 0;
1934
1935 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1936
1937 pa_usec_t *usec = userdata;
1938 *usec = pa_sink_get_requested_latency_within_thread(s);
1939
1940 /* Yes, that's right, the IO thread will see -1 when no
1941 * explicit requested latency is configured, the main
1942 * thread will see max_latency */
1943 if (*usec == (pa_usec_t) -1)
1944 *usec = s->thread_info.max_latency;
1945
1946 return 0;
1947 }
1948
1949 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1950 pa_usec_t *r = userdata;
1951
1952 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1953
1954 return 0;
1955 }
1956
1957 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1958 pa_usec_t *r = userdata;
1959
1960 r[0] = s->thread_info.min_latency;
1961 r[1] = s->thread_info.max_latency;
1962
1963 return 0;
1964 }
1965
1966 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
1967
1968 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1969 return 0;
1970
1971 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
1972
1973 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1974 return 0;
1975
1976 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1977
1978 *((size_t*) userdata) = s->thread_info.max_rewind;
1979 return 0;
1980
1981 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1982
1983 *((size_t*) userdata) = s->thread_info.max_request;
1984 return 0;
1985
1986 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1987
1988 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1989 return 0;
1990
1991 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1992
1993 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1994 return 0;
1995
1996 case PA_SINK_MESSAGE_GET_LATENCY:
1997 case PA_SINK_MESSAGE_MAX:
1998 ;
1999 }
2000
2001 return -1;
2002 }
2003
2004 /* Called from main thread */
2005 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2006 pa_sink *sink;
2007 uint32_t idx;
2008 int ret = 0;
2009
2010 pa_core_assert_ref(c);
2011 pa_assert_ctl_context();
2012 pa_assert(cause != 0);
2013
2014 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2015 int r;
2016
2017 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2018 ret = r;
2019 }
2020
2021 return ret;
2022 }
2023
2024 /* Called from main thread */
2025 void pa_sink_detach(pa_sink *s) {
2026 pa_sink_assert_ref(s);
2027 pa_assert_ctl_context();
2028 pa_assert(PA_SINK_IS_LINKED(s->state));
2029
2030 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2031 }
2032
2033 /* Called from main thread */
2034 void pa_sink_attach(pa_sink *s) {
2035 pa_sink_assert_ref(s);
2036 pa_assert_ctl_context();
2037 pa_assert(PA_SINK_IS_LINKED(s->state));
2038
2039 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2040 }
2041
2042 /* Called from IO thread */
2043 void pa_sink_detach_within_thread(pa_sink *s) {
2044 pa_sink_input *i;
2045 void *state = NULL;
2046
2047 pa_sink_assert_ref(s);
2048 pa_sink_assert_io_context(s);
2049 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2050
2051 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2052 if (i->detach)
2053 i->detach(i);
2054
2055 if (s->monitor_source)
2056 pa_source_detach_within_thread(s->monitor_source);
2057 }
2058
2059 /* Called from IO thread */
2060 void pa_sink_attach_within_thread(pa_sink *s) {
2061 pa_sink_input *i;
2062 void *state = NULL;
2063
2064 pa_sink_assert_ref(s);
2065 pa_sink_assert_io_context(s);
2066 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2067
2068 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2069 if (i->attach)
2070 i->attach(i);
2071
2072 if (s->monitor_source)
2073 pa_source_attach_within_thread(s->monitor_source);
2074 }
2075
2076 /* Called from IO thread */
2077 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2078 pa_sink_assert_ref(s);
2079 pa_sink_assert_io_context(s);
2080 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2081
2082 if (s->thread_info.state == PA_SINK_SUSPENDED)
2083 return;
2084
2085 if (nbytes == (size_t) -1)
2086 nbytes = s->thread_info.max_rewind;
2087
2088 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2089
2090 if (s->thread_info.rewind_requested &&
2091 nbytes <= s->thread_info.rewind_nbytes)
2092 return;
2093
2094 s->thread_info.rewind_nbytes = nbytes;
2095 s->thread_info.rewind_requested = TRUE;
2096
2097 if (s->request_rewind)
2098 s->request_rewind(s);
2099 }
2100
2101 /* Called from IO thread */
2102 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2103 pa_usec_t result = (pa_usec_t) -1;
2104 pa_sink_input *i;
2105 void *state = NULL;
2106 pa_usec_t monitor_latency;
2107
2108 pa_sink_assert_ref(s);
2109 pa_sink_assert_io_context(s);
2110
2111 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2112 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2113
2114 if (s->thread_info.requested_latency_valid)
2115 return s->thread_info.requested_latency;
2116
2117 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2118 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2119 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2120 result = i->thread_info.requested_sink_latency;
2121
2122 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2123
2124 if (monitor_latency != (pa_usec_t) -1 &&
2125 (result == (pa_usec_t) -1 || result > monitor_latency))
2126 result = monitor_latency;
2127
2128 if (result != (pa_usec_t) -1)
2129 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2130
2131 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2132 /* Only cache if properly initialized */
2133 s->thread_info.requested_latency = result;
2134 s->thread_info.requested_latency_valid = TRUE;
2135 }
2136
2137 return result;
2138 }
2139
2140 /* Called from main thread */
2141 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2142 pa_usec_t usec = 0;
2143
2144 pa_sink_assert_ref(s);
2145 pa_assert_ctl_context();
2146 pa_assert(PA_SINK_IS_LINKED(s->state));
2147
2148 if (s->state == PA_SINK_SUSPENDED)
2149 return 0;
2150
2151 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2152 return usec;
2153 }
2154
2155 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2156 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2157 pa_sink_input *i;
2158 void *state = NULL;
2159
2160 pa_sink_assert_ref(s);
2161 pa_sink_assert_io_context(s);
2162
2163 if (max_rewind == s->thread_info.max_rewind)
2164 return;
2165
2166 s->thread_info.max_rewind = max_rewind;
2167
2168 if (PA_SINK_IS_LINKED(s->thread_info.state))
2169 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2170 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2171
2172 if (s->monitor_source)
2173 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2174 }
2175
2176 /* Called from main thread */
2177 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2178 pa_sink_assert_ref(s);
2179 pa_assert_ctl_context();
2180
2181 if (PA_SINK_IS_LINKED(s->state))
2182 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2183 else
2184 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2185 }
2186
2187 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2188 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2189 void *state = NULL;
2190
2191 pa_sink_assert_ref(s);
2192 pa_sink_assert_io_context(s);
2193
2194 if (max_request == s->thread_info.max_request)
2195 return;
2196
2197 s->thread_info.max_request = max_request;
2198
2199 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2200 pa_sink_input *i;
2201
2202 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2203 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2204 }
2205 }
2206
2207 /* Called from main thread */
2208 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2209 pa_sink_assert_ref(s);
2210 pa_assert_ctl_context();
2211
2212 if (PA_SINK_IS_LINKED(s->state))
2213 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2214 else
2215 pa_sink_set_max_request_within_thread(s, max_request);
2216 }
2217
2218 /* Called from IO thread */
2219 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2220 pa_sink_input *i;
2221 void *state = NULL;
2222
2223 pa_sink_assert_ref(s);
2224 pa_sink_assert_io_context(s);
2225
2226 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2227 s->thread_info.requested_latency_valid = FALSE;
2228 else if (dynamic)
2229 return;
2230
2231 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2232
2233 if (s->update_requested_latency)
2234 s->update_requested_latency(s);
2235
2236 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2237 if (i->update_sink_requested_latency)
2238 i->update_sink_requested_latency(i);
2239 }
2240 }
2241
2242 /* Called from main thread */
2243 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2244 pa_sink_assert_ref(s);
2245 pa_assert_ctl_context();
2246
2247 /* min_latency == 0: no limit
2248 * min_latency anything else: specified limit
2249 *
2250 * Similar for max_latency */
2251
2252 if (min_latency < ABSOLUTE_MIN_LATENCY)
2253 min_latency = ABSOLUTE_MIN_LATENCY;
2254
2255 if (max_latency <= 0 ||
2256 max_latency > ABSOLUTE_MAX_LATENCY)
2257 max_latency = ABSOLUTE_MAX_LATENCY;
2258
2259 pa_assert(min_latency <= max_latency);
2260
2261 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2262 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2263 max_latency == ABSOLUTE_MAX_LATENCY) ||
2264 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2265
2266 if (PA_SINK_IS_LINKED(s->state)) {
2267 pa_usec_t r[2];
2268
2269 r[0] = min_latency;
2270 r[1] = max_latency;
2271
2272 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2273 } else
2274 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2275 }
2276
2277 /* Called from main thread */
2278 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2279 pa_sink_assert_ref(s);
2280 pa_assert_ctl_context();
2281 pa_assert(min_latency);
2282 pa_assert(max_latency);
2283
2284 if (PA_SINK_IS_LINKED(s->state)) {
2285 pa_usec_t r[2] = { 0, 0 };
2286
2287 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2288
2289 *min_latency = r[0];
2290 *max_latency = r[1];
2291 } else {
2292 *min_latency = s->thread_info.min_latency;
2293 *max_latency = s->thread_info.max_latency;
2294 }
2295 }
2296
2297 /* Called from IO thread */
2298 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2299 pa_sink_assert_ref(s);
2300 pa_sink_assert_io_context(s);
2301
2302 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2303 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2304 pa_assert(min_latency <= max_latency);
2305
2306 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2307 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2308 max_latency == ABSOLUTE_MAX_LATENCY) ||
2309 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2310
2311 if (s->thread_info.min_latency == min_latency &&
2312 s->thread_info.max_latency == max_latency)
2313 return;
2314
2315 s->thread_info.min_latency = min_latency;
2316 s->thread_info.max_latency = max_latency;
2317
2318 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2319 pa_sink_input *i;
2320 void *state = NULL;
2321
2322 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2323 if (i->update_sink_latency_range)
2324 i->update_sink_latency_range(i);
2325 }
2326
2327 pa_sink_invalidate_requested_latency(s, FALSE);
2328
2329 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2330 }
2331
2332 /* Called from main thread */
2333 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2334 pa_sink_assert_ref(s);
2335 pa_assert_ctl_context();
2336
2337 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2338 pa_assert(latency == 0);
2339 return;
2340 }
2341
2342 if (latency < ABSOLUTE_MIN_LATENCY)
2343 latency = ABSOLUTE_MIN_LATENCY;
2344
2345 if (latency > ABSOLUTE_MAX_LATENCY)
2346 latency = ABSOLUTE_MAX_LATENCY;
2347
2348 if (PA_SINK_IS_LINKED(s->state))
2349 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2350 else
2351 s->thread_info.fixed_latency = latency;
2352
2353 pa_source_set_fixed_latency(s->monitor_source, latency);
2354 }
2355
2356 /* Called from main thread */
2357 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2358 pa_usec_t latency;
2359
2360 pa_sink_assert_ref(s);
2361 pa_assert_ctl_context();
2362
2363 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2364 return 0;
2365
2366 if (PA_SINK_IS_LINKED(s->state))
2367 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2368 else
2369 latency = s->thread_info.fixed_latency;
2370
2371 return latency;
2372 }
2373
2374 /* Called from IO thread */
2375 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2376 pa_sink_assert_ref(s);
2377 pa_sink_assert_io_context(s);
2378
2379 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2380 pa_assert(latency == 0);
2381 return;
2382 }
2383
2384 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2385 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2386
2387 if (s->thread_info.fixed_latency == latency)
2388 return;
2389
2390 s->thread_info.fixed_latency = latency;
2391
2392 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2393 pa_sink_input *i;
2394 void *state = NULL;
2395
2396 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2397 if (i->update_sink_fixed_latency)
2398 i->update_sink_fixed_latency(i);
2399 }
2400
2401 pa_sink_invalidate_requested_latency(s, FALSE);
2402
2403 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2404 }
2405
2406 /* Called from main context */
2407 size_t pa_sink_get_max_rewind(pa_sink *s) {
2408 size_t r;
2409 pa_sink_assert_ref(s);
2410 pa_assert_ctl_context();
2411
2412 if (!PA_SINK_IS_LINKED(s->state))
2413 return s->thread_info.max_rewind;
2414
2415 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2416
2417 return r;
2418 }
2419
2420 /* Called from main context */
2421 size_t pa_sink_get_max_request(pa_sink *s) {
2422 size_t r;
2423 pa_sink_assert_ref(s);
2424 pa_assert_ctl_context();
2425
2426 if (!PA_SINK_IS_LINKED(s->state))
2427 return s->thread_info.max_request;
2428
2429 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2430
2431 return r;
2432 }
2433
2434 /* Called from main context */
2435 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2436 pa_device_port *port;
2437
2438 pa_sink_assert_ref(s);
2439 pa_assert_ctl_context();
2440
2441 if (!s->set_port) {
2442 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2443 return -PA_ERR_NOTIMPLEMENTED;
2444 }
2445
2446 if (!s->ports)
2447 return -PA_ERR_NOENTITY;
2448
2449 if (!(port = pa_hashmap_get(s->ports, name)))
2450 return -PA_ERR_NOENTITY;
2451
2452 if (s->active_port == port) {
2453 s->save_port = s->save_port || save;
2454 return 0;
2455 }
2456
2457 if ((s->set_port(s, port)) < 0)
2458 return -PA_ERR_NOENTITY;
2459
2460 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2461
2462 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2463
2464 s->active_port = port;
2465 s->save_port = save;
2466
2467 return 0;
2468 }
2469
2470 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2471 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2472
2473 pa_assert(p);
2474
2475 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2476 return TRUE;
2477
2478 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2479
2480 if (pa_streq(ff, "microphone"))
2481 t = "audio-input-microphone";
2482 else if (pa_streq(ff, "webcam"))
2483 t = "camera-web";
2484 else if (pa_streq(ff, "computer"))
2485 t = "computer";
2486 else if (pa_streq(ff, "handset"))
2487 t = "phone";
2488 else if (pa_streq(ff, "portable"))
2489 t = "multimedia-player";
2490 else if (pa_streq(ff, "tv"))
2491 t = "video-display";
2492
2493 /*
2494 * The following icons are not part of the icon naming spec,
2495 * because Rodney Dawes sucks as the maintainer of that spec.
2496 *
2497 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2498 */
2499 else if (pa_streq(ff, "headset"))
2500 t = "audio-headset";
2501 else if (pa_streq(ff, "headphone"))
2502 t = "audio-headphones";
2503 else if (pa_streq(ff, "speaker"))
2504 t = "audio-speakers";
2505 else if (pa_streq(ff, "hands-free"))
2506 t = "audio-handsfree";
2507 }
2508
2509 if (!t)
2510 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2511 if (pa_streq(c, "modem"))
2512 t = "modem";
2513
2514 if (!t) {
2515 if (is_sink)
2516 t = "audio-card";
2517 else
2518 t = "audio-input-microphone";
2519 }
2520
2521 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2522 if (strstr(profile, "analog"))
2523 s = "-analog";
2524 else if (strstr(profile, "iec958"))
2525 s = "-iec958";
2526 else if (strstr(profile, "hdmi"))
2527 s = "-hdmi";
2528 }
2529
2530 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2531
2532 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2533
2534 return TRUE;
2535 }
2536
2537 pa_bool_t pa_device_init_description(pa_proplist *p) {
2538 const char *s, *d = NULL, *k;
2539 pa_assert(p);
2540
2541 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2542 return TRUE;
2543
2544 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2545 if (pa_streq(s, "internal"))
2546 d = _("Internal Audio");
2547
2548 if (!d)
2549 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2550 if (pa_streq(s, "modem"))
2551 d = _("Modem");
2552
2553 if (!d)
2554 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2555
2556 if (!d)
2557 return FALSE;
2558
2559 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2560
2561 if (d && k)
2562 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2563 else if (d)
2564 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2565
2566 return TRUE;
2567 }
2568
2569 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2570 const char *s;
2571 pa_assert(p);
2572
2573 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2574 return TRUE;
2575
2576 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2577 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2578 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2579 return TRUE;
2580 }
2581
2582 return FALSE;
2583 }