]> code.delx.au - pulseaudio/blob - src/pulsecore/sink.c
core: add to FIXMEs
[pulseaudio] / src / pulsecore / sink.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2006 Lennart Poettering
5 Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
6
7 PulseAudio is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Lesser General Public License as published
9 by the Free Software Foundation; either version 2.1 of the License,
10 or (at your option) any later version.
11
12 PulseAudio is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public License
18 along with PulseAudio; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
20 USA.
21 ***/
22
23 #ifdef HAVE_CONFIG_H
24 #include <config.h>
25 #endif
26
27 #include <stdlib.h>
28 #include <string.h>
29 #include <stdio.h>
30
31 #include <pulse/introspect.h>
32 #include <pulse/utf8.h>
33 #include <pulse/xmalloc.h>
34 #include <pulse/timeval.h>
35 #include <pulse/util.h>
36 #include <pulse/i18n.h>
37
38 #include <pulsecore/sink-input.h>
39 #include <pulsecore/namereg.h>
40 #include <pulsecore/core-util.h>
41 #include <pulsecore/sample-util.h>
42 #include <pulsecore/core-subscribe.h>
43 #include <pulsecore/log.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/play-memblockq.h>
46
47 #include "sink.h"
48
49 #define MAX_MIX_CHANNELS 32
50 #define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
51 #define ABSOLUTE_MIN_LATENCY (500)
52 #define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
53 #define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
54
55 static PA_DEFINE_CHECK_TYPE(pa_sink, pa_msgobject);
56
57 static void sink_free(pa_object *s);
58
59 pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
60 pa_assert(data);
61
62 pa_zero(*data);
63 data->proplist = pa_proplist_new();
64
65 return data;
66 }
67
68 void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
69 pa_assert(data);
70
71 pa_xfree(data->name);
72 data->name = pa_xstrdup(name);
73 }
74
75 void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
76 pa_assert(data);
77
78 if ((data->sample_spec_is_set = !!spec))
79 data->sample_spec = *spec;
80 }
81
82 void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
83 pa_assert(data);
84
85 if ((data->channel_map_is_set = !!map))
86 data->channel_map = *map;
87 }
88
89 void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
90 pa_assert(data);
91
92 if ((data->volume_is_set = !!volume))
93 data->volume = *volume;
94 }
95
96 void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
97 pa_assert(data);
98
99 data->muted_is_set = TRUE;
100 data->muted = !!mute;
101 }
102
103 void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
104 pa_assert(data);
105
106 pa_xfree(data->active_port);
107 data->active_port = pa_xstrdup(port);
108 }
109
110 void pa_sink_new_data_done(pa_sink_new_data *data) {
111 pa_assert(data);
112
113 pa_proplist_free(data->proplist);
114
115 if (data->ports) {
116 pa_device_port *p;
117
118 while ((p = pa_hashmap_steal_first(data->ports)))
119 pa_device_port_free(p);
120
121 pa_hashmap_free(data->ports, NULL, NULL);
122 }
123
124 pa_xfree(data->name);
125 pa_xfree(data->active_port);
126 }
127
128 pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
129 pa_device_port *p;
130
131 pa_assert(name);
132
133 p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
134 p->name = pa_xstrdup(name);
135 p->description = pa_xstrdup(description);
136
137 p->priority = 0;
138
139 return p;
140 }
141
142 void pa_device_port_free(pa_device_port *p) {
143 pa_assert(p);
144
145 pa_xfree(p->name);
146 pa_xfree(p->description);
147 pa_xfree(p);
148 }
149
150 /* Called from main context */
151 static void reset_callbacks(pa_sink *s) {
152 pa_assert(s);
153
154 s->set_state = NULL;
155 s->get_volume = NULL;
156 s->set_volume = NULL;
157 s->get_mute = NULL;
158 s->set_mute = NULL;
159 s->request_rewind = NULL;
160 s->update_requested_latency = NULL;
161 s->set_port = NULL;
162 }
163
164 /* Called from main context */
165 pa_sink* pa_sink_new(
166 pa_core *core,
167 pa_sink_new_data *data,
168 pa_sink_flags_t flags) {
169
170 pa_sink *s;
171 const char *name;
172 char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
173 pa_source_new_data source_data;
174 const char *dn;
175 char *pt;
176
177 pa_assert(core);
178 pa_assert(data);
179 pa_assert(data->name);
180 pa_assert_ctl_context();
181
182 s = pa_msgobject_new(pa_sink);
183
184 if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
185 pa_log_debug("Failed to register name %s.", data->name);
186 pa_xfree(s);
187 return NULL;
188 }
189
190 pa_sink_new_data_set_name(data, name);
191
192 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
193 pa_xfree(s);
194 pa_namereg_unregister(core, name);
195 return NULL;
196 }
197
198 /* FIXME, need to free s here on failure */
199
200 pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
201 pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
202
203 pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
204
205 if (!data->channel_map_is_set)
206 pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
207
208 pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
209 pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
210
211 if (!data->volume_is_set)
212 pa_cvolume_reset(&data->volume, data->sample_spec.channels);
213
214 pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
215 pa_return_null_if_fail(data->volume.channels == data->sample_spec.channels);
216
217 if (!data->muted_is_set)
218 data->muted = FALSE;
219
220 if (data->card)
221 pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
222
223 pa_device_init_description(data->proplist);
224 pa_device_init_icon(data->proplist, TRUE);
225 pa_device_init_intended_roles(data->proplist);
226
227 if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
228 pa_xfree(s);
229 pa_namereg_unregister(core, name);
230 return NULL;
231 }
232
233 s->parent.parent.free = sink_free;
234 s->parent.process_msg = pa_sink_process_msg;
235
236 s->core = core;
237 s->state = PA_SINK_INIT;
238 s->flags = flags;
239 s->suspend_cause = 0;
240 s->name = pa_xstrdup(name);
241 s->proplist = pa_proplist_copy(data->proplist);
242 s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
243 s->module = data->module;
244 s->card = data->card;
245
246 s->sample_spec = data->sample_spec;
247 s->channel_map = data->channel_map;
248
249 s->inputs = pa_idxset_new(NULL, NULL);
250 s->n_corked = 0;
251
252 s->reference_volume = s->virtual_volume = data->volume;
253 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
254 s->base_volume = PA_VOLUME_NORM;
255 s->n_volume_steps = PA_VOLUME_NORM+1;
256 s->muted = data->muted;
257 s->refresh_volume = s->refresh_muted = FALSE;
258
259 reset_callbacks(s);
260 s->userdata = NULL;
261
262 s->asyncmsgq = NULL;
263
264 /* As a minor optimization we just steal the list instead of
265 * copying it here */
266 s->ports = data->ports;
267 data->ports = NULL;
268
269 s->active_port = NULL;
270 s->save_port = FALSE;
271
272 if (data->active_port && s->ports)
273 if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
274 s->save_port = data->save_port;
275
276 if (!s->active_port && s->ports) {
277 void *state;
278 pa_device_port *p;
279
280 PA_HASHMAP_FOREACH(p, s->ports, state)
281 if (!s->active_port || p->priority > s->active_port->priority)
282 s->active_port = p;
283 }
284
285 s->save_volume = data->save_volume;
286 s->save_muted = data->save_muted;
287
288 pa_silence_memchunk_get(
289 &core->silence_cache,
290 core->mempool,
291 &s->silence,
292 &s->sample_spec,
293 0);
294
295 s->thread_info.rtpoll = NULL;
296 s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
297 s->thread_info.soft_volume = s->soft_volume;
298 s->thread_info.soft_muted = s->muted;
299 s->thread_info.state = s->state;
300 s->thread_info.rewind_nbytes = 0;
301 s->thread_info.rewind_requested = FALSE;
302 s->thread_info.max_rewind = 0;
303 s->thread_info.max_request = 0;
304 s->thread_info.requested_latency_valid = FALSE;
305 s->thread_info.requested_latency = 0;
306 s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
307 s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
308 s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
309
310 /* FIXME: This should probably be moved to pa_sink_put() */
311 pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
312
313 if (s->card)
314 pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
315
316 pt = pa_proplist_to_string_sep(s->proplist, "\n ");
317 pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
318 s->index,
319 s->name,
320 pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
321 pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
322 pt);
323 pa_xfree(pt);
324
325 pa_source_new_data_init(&source_data);
326 pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
327 pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
328 source_data.name = pa_sprintf_malloc("%s.monitor", name);
329 source_data.driver = data->driver;
330 source_data.module = data->module;
331 source_data.card = data->card;
332
333 dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
334 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
335 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
336
337 s->monitor_source = pa_source_new(core, &source_data,
338 ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
339 ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
340
341 pa_source_new_data_done(&source_data);
342
343 if (!s->monitor_source) {
344 pa_sink_unlink(s);
345 pa_sink_unref(s);
346 return NULL;
347 }
348
349 s->monitor_source->monitor_of = s;
350
351 pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
352 pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
353 pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
354
355 return s;
356 }
357
358 /* Called from main context */
359 static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
360 int ret;
361 pa_bool_t suspend_change;
362 pa_sink_state_t original_state;
363
364 pa_assert(s);
365 pa_assert_ctl_context();
366
367 if (s->state == state)
368 return 0;
369
370 original_state = s->state;
371
372 suspend_change =
373 (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
374 (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
375
376 if (s->set_state)
377 if ((ret = s->set_state(s, state)) < 0)
378 return ret;
379
380 if (s->asyncmsgq)
381 if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
382
383 if (s->set_state)
384 s->set_state(s, original_state);
385
386 return ret;
387 }
388
389 s->state = state;
390
391 if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
392 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
393 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
394 }
395
396 if (suspend_change) {
397 pa_sink_input *i;
398 uint32_t idx;
399
400 /* We're suspending or resuming, tell everyone about it */
401
402 PA_IDXSET_FOREACH(i, s->inputs, idx)
403 if (s->state == PA_SINK_SUSPENDED &&
404 (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
405 pa_sink_input_kill(i);
406 else if (i->suspend)
407 i->suspend(i, state == PA_SINK_SUSPENDED);
408
409 if (s->monitor_source)
410 pa_source_sync_suspend(s->monitor_source);
411 }
412
413 return 0;
414 }
415
416 /* Called from main context */
417 void pa_sink_put(pa_sink* s) {
418 pa_sink_assert_ref(s);
419 pa_assert_ctl_context();
420
421 pa_assert(s->state == PA_SINK_INIT);
422
423 /* The following fields must be initialized properly when calling _put() */
424 pa_assert(s->asyncmsgq);
425 pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
426
427 /* Generally, flags should be initialized via pa_sink_new(). As a
428 * special exception we allow volume related flags to be set
429 * between _new() and _put(). */
430
431 if (!(s->flags & PA_SINK_HW_VOLUME_CTRL))
432 s->flags |= PA_SINK_DECIBEL_VOLUME;
433
434 if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
435 s->flags |= PA_SINK_FLAT_VOLUME;
436
437 s->thread_info.soft_volume = s->soft_volume;
438 s->thread_info.soft_muted = s->muted;
439
440 pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL) || (s->base_volume == PA_VOLUME_NORM && s->flags & PA_SINK_DECIBEL_VOLUME));
441 pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
442 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
443 pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
444 pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
445
446 pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
447 pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
448 pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
449
450 pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
451
452 pa_source_put(s->monitor_source);
453
454 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
455 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
456 }
457
458 /* Called from main context */
459 void pa_sink_unlink(pa_sink* s) {
460 pa_bool_t linked;
461 pa_sink_input *i, *j = NULL;
462
463 pa_assert(s);
464 pa_assert_ctl_context();
465
466 /* Please note that pa_sink_unlink() does more than simply
467 * reversing pa_sink_put(). It also undoes the registrations
468 * already done in pa_sink_new()! */
469
470 /* All operations here shall be idempotent, i.e. pa_sink_unlink()
471 * may be called multiple times on the same sink without bad
472 * effects. */
473
474 linked = PA_SINK_IS_LINKED(s->state);
475
476 if (linked)
477 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
478
479 if (s->state != PA_SINK_UNLINKED)
480 pa_namereg_unregister(s->core, s->name);
481 pa_idxset_remove_by_data(s->core->sinks, s, NULL);
482
483 if (s->card)
484 pa_idxset_remove_by_data(s->card->sinks, s, NULL);
485
486 while ((i = pa_idxset_first(s->inputs, NULL))) {
487 pa_assert(i != j);
488 pa_sink_input_kill(i);
489 j = i;
490 }
491
492 if (linked)
493 sink_set_state(s, PA_SINK_UNLINKED);
494 else
495 s->state = PA_SINK_UNLINKED;
496
497 reset_callbacks(s);
498
499 if (s->monitor_source)
500 pa_source_unlink(s->monitor_source);
501
502 if (linked) {
503 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
504 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
505 }
506 }
507
508 /* Called from main context */
509 static void sink_free(pa_object *o) {
510 pa_sink *s = PA_SINK(o);
511 pa_sink_input *i;
512
513 pa_assert(s);
514 pa_assert_ctl_context();
515 pa_assert(pa_sink_refcnt(s) == 0);
516
517 if (PA_SINK_IS_LINKED(s->state))
518 pa_sink_unlink(s);
519
520 pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
521
522 if (s->monitor_source) {
523 pa_source_unref(s->monitor_source);
524 s->monitor_source = NULL;
525 }
526
527 pa_idxset_free(s->inputs, NULL, NULL);
528
529 while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
530 pa_sink_input_unref(i);
531
532 pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
533
534 if (s->silence.memblock)
535 pa_memblock_unref(s->silence.memblock);
536
537 pa_xfree(s->name);
538 pa_xfree(s->driver);
539
540 if (s->proplist)
541 pa_proplist_free(s->proplist);
542
543 if (s->ports) {
544 pa_device_port *p;
545
546 while ((p = pa_hashmap_steal_first(s->ports)))
547 pa_device_port_free(p);
548
549 pa_hashmap_free(s->ports, NULL, NULL);
550 }
551
552 pa_xfree(s);
553 }
554
555 /* Called from main context, and not while the IO thread is active, please */
556 void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
557 pa_sink_assert_ref(s);
558 pa_assert_ctl_context();
559
560 s->asyncmsgq = q;
561
562 if (s->monitor_source)
563 pa_source_set_asyncmsgq(s->monitor_source, q);
564 }
565
566 /* Called from main context, and not while the IO thread is active, please */
567 void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
568 pa_sink_assert_ref(s);
569 pa_assert_ctl_context();
570
571 if (mask == 0)
572 return;
573
574 /* For now, allow only a minimal set of flags to be changed. */
575 pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
576
577 s->flags = (s->flags & ~mask) | (value & mask);
578
579 pa_source_update_flags(s->monitor_source,
580 ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
581 ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
582 ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
583 ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
584 }
585
586 /* Called from IO context, or before _put() from main context */
587 void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
588 pa_sink_assert_ref(s);
589 pa_sink_assert_io_context(s);
590
591 s->thread_info.rtpoll = p;
592
593 if (s->monitor_source)
594 pa_source_set_rtpoll(s->monitor_source, p);
595 }
596
597 /* Called from main context */
598 int pa_sink_update_status(pa_sink*s) {
599 pa_sink_assert_ref(s);
600 pa_assert_ctl_context();
601 pa_assert(PA_SINK_IS_LINKED(s->state));
602
603 if (s->state == PA_SINK_SUSPENDED)
604 return 0;
605
606 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
607 }
608
609 /* Called from main context */
610 int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
611 pa_sink_assert_ref(s);
612 pa_assert_ctl_context();
613 pa_assert(PA_SINK_IS_LINKED(s->state));
614 pa_assert(cause != 0);
615
616 if (suspend) {
617 s->suspend_cause |= cause;
618 s->monitor_source->suspend_cause |= cause;
619 } else {
620 s->suspend_cause &= ~cause;
621 s->monitor_source->suspend_cause &= ~cause;
622 }
623
624 if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
625 return 0;
626
627 pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
628
629 if (s->suspend_cause)
630 return sink_set_state(s, PA_SINK_SUSPENDED);
631 else
632 return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
633 }
634
635 /* Called from main context */
636 pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
637 pa_sink_input *i, *n;
638 uint32_t idx;
639
640 pa_sink_assert_ref(s);
641 pa_assert_ctl_context();
642 pa_assert(PA_SINK_IS_LINKED(s->state));
643
644 if (!q)
645 q = pa_queue_new();
646
647 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
648 n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
649
650 pa_sink_input_ref(i);
651
652 if (pa_sink_input_start_move(i) >= 0)
653 pa_queue_push(q, i);
654 else
655 pa_sink_input_unref(i);
656 }
657
658 return q;
659 }
660
661 /* Called from main context */
662 void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
663 pa_sink_input *i;
664
665 pa_sink_assert_ref(s);
666 pa_assert_ctl_context();
667 pa_assert(PA_SINK_IS_LINKED(s->state));
668 pa_assert(q);
669
670 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
671 if (pa_sink_input_finish_move(i, s, save) < 0)
672 pa_sink_input_fail_move(i);
673
674 pa_sink_input_unref(i);
675 }
676
677 pa_queue_free(q, NULL, NULL);
678 }
679
680 /* Called from main context */
681 void pa_sink_move_all_fail(pa_queue *q) {
682 pa_sink_input *i;
683
684 pa_assert_ctl_context();
685 pa_assert(q);
686
687 while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
688 pa_sink_input_fail_move(i);
689 pa_sink_input_unref(i);
690 }
691
692 pa_queue_free(q, NULL, NULL);
693 }
694
695 /* Called from IO thread context */
696 void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
697 pa_sink_input *i;
698 void *state = NULL;
699
700 pa_sink_assert_ref(s);
701 pa_sink_assert_io_context(s);
702 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
703
704 /* If nobody requested this and this is actually no real rewind
705 * then we can short cut this. Please note that this means that
706 * not all rewind requests triggered upstream will always be
707 * translated in actual requests! */
708 if (!s->thread_info.rewind_requested && nbytes <= 0)
709 return;
710
711 s->thread_info.rewind_nbytes = 0;
712 s->thread_info.rewind_requested = FALSE;
713
714 if (s->thread_info.state == PA_SINK_SUSPENDED)
715 return;
716
717 if (nbytes > 0)
718 pa_log_debug("Processing rewind...");
719
720 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
721 pa_sink_input_assert_ref(i);
722 pa_sink_input_process_rewind(i, nbytes);
723 }
724
725 if (nbytes > 0)
726 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
727 pa_source_process_rewind(s->monitor_source, nbytes);
728 }
729
730 /* Called from IO thread context */
731 static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
732 pa_sink_input *i;
733 unsigned n = 0;
734 void *state = NULL;
735 size_t mixlength = *length;
736
737 pa_sink_assert_ref(s);
738 pa_sink_assert_io_context(s);
739 pa_assert(info);
740
741 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
742 pa_sink_input_assert_ref(i);
743
744 pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
745
746 if (mixlength == 0 || info->chunk.length < mixlength)
747 mixlength = info->chunk.length;
748
749 if (pa_memblock_is_silence(info->chunk.memblock)) {
750 pa_memblock_unref(info->chunk.memblock);
751 continue;
752 }
753
754 info->userdata = pa_sink_input_ref(i);
755
756 pa_assert(info->chunk.memblock);
757 pa_assert(info->chunk.length > 0);
758
759 info++;
760 n++;
761 maxinfo--;
762 }
763
764 if (mixlength > 0)
765 *length = mixlength;
766
767 return n;
768 }
769
770 /* Called from IO thread context */
771 static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
772 pa_sink_input *i;
773 void *state = NULL;
774 unsigned p = 0;
775 unsigned n_unreffed = 0;
776
777 pa_sink_assert_ref(s);
778 pa_sink_assert_io_context(s);
779 pa_assert(result);
780 pa_assert(result->memblock);
781 pa_assert(result->length > 0);
782
783 /* We optimize for the case where the order of the inputs has not changed */
784
785 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL))) {
786 unsigned j;
787 pa_mix_info* m = NULL;
788
789 pa_sink_input_assert_ref(i);
790
791 /* Let's try to find the matching entry info the pa_mix_info array */
792 for (j = 0; j < n; j ++) {
793
794 if (info[p].userdata == i) {
795 m = info + p;
796 break;
797 }
798
799 p++;
800 if (p >= n)
801 p = 0;
802 }
803
804 /* Drop read data */
805 pa_sink_input_drop(i, result->length);
806
807 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
808
809 if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
810 void *ostate = NULL;
811 pa_source_output *o;
812 pa_memchunk c;
813
814 if (m && m->chunk.memblock) {
815 c = m->chunk;
816 pa_memblock_ref(c.memblock);
817 pa_assert(result->length <= c.length);
818 c.length = result->length;
819
820 pa_memchunk_make_writable(&c, 0);
821 pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
822 } else {
823 c = s->silence;
824 pa_memblock_ref(c.memblock);
825 pa_assert(result->length <= c.length);
826 c.length = result->length;
827 }
828
829 while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
830 pa_source_output_assert_ref(o);
831 pa_assert(o->direct_on_input == i);
832 pa_source_post_direct(s->monitor_source, o, &c);
833 }
834
835 pa_memblock_unref(c.memblock);
836 }
837 }
838
839 if (m) {
840 if (m->chunk.memblock)
841 pa_memblock_unref(m->chunk.memblock);
842 pa_memchunk_reset(&m->chunk);
843
844 pa_sink_input_unref(m->userdata);
845 m->userdata = NULL;
846
847 n_unreffed += 1;
848 }
849 }
850
851 /* Now drop references to entries that are included in the
852 * pa_mix_info array but don't exist anymore */
853
854 if (n_unreffed < n) {
855 for (; n > 0; info++, n--) {
856 if (info->userdata)
857 pa_sink_input_unref(info->userdata);
858 if (info->chunk.memblock)
859 pa_memblock_unref(info->chunk.memblock);
860 }
861 }
862
863 if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
864 pa_source_post(s->monitor_source, result);
865 }
866
867 /* Called from IO thread context */
868 void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
869 pa_mix_info info[MAX_MIX_CHANNELS];
870 unsigned n;
871 size_t block_size_max;
872
873 pa_sink_assert_ref(s);
874 pa_sink_assert_io_context(s);
875 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
876 pa_assert(pa_frame_aligned(length, &s->sample_spec));
877 pa_assert(result);
878
879 pa_sink_ref(s);
880
881 pa_assert(!s->thread_info.rewind_requested);
882 pa_assert(s->thread_info.rewind_nbytes == 0);
883
884 if (s->thread_info.state == PA_SINK_SUSPENDED) {
885 result->memblock = pa_memblock_ref(s->silence.memblock);
886 result->index = s->silence.index;
887 result->length = PA_MIN(s->silence.length, length);
888 return;
889 }
890
891 if (length <= 0)
892 length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
893
894 block_size_max = pa_mempool_block_size_max(s->core->mempool);
895 if (length > block_size_max)
896 length = pa_frame_align(block_size_max, &s->sample_spec);
897
898 pa_assert(length > 0);
899
900 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
901
902 if (n == 0) {
903
904 *result = s->silence;
905 pa_memblock_ref(result->memblock);
906
907 if (result->length > length)
908 result->length = length;
909
910 } else if (n == 1) {
911 pa_cvolume volume;
912
913 *result = info[0].chunk;
914 pa_memblock_ref(result->memblock);
915
916 if (result->length > length)
917 result->length = length;
918
919 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
920
921 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
922 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
923 pa_memblock_unref(result->memblock);
924 pa_silence_memchunk_get(&s->core->silence_cache,
925 s->core->mempool,
926 result,
927 &s->sample_spec,
928 result->length);
929 } else {
930 pa_memchunk_make_writable(result, 0);
931 pa_volume_memchunk(result, &s->sample_spec, &volume);
932 }
933 }
934 } else {
935 void *ptr;
936 result->memblock = pa_memblock_new(s->core->mempool, length);
937
938 ptr = pa_memblock_acquire(result->memblock);
939 result->length = pa_mix(info, n,
940 ptr, length,
941 &s->sample_spec,
942 &s->thread_info.soft_volume,
943 s->thread_info.soft_muted);
944 pa_memblock_release(result->memblock);
945
946 result->index = 0;
947 }
948
949 inputs_drop(s, info, n, result);
950
951 pa_sink_unref(s);
952 }
953
954 /* Called from IO thread context */
955 void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
956 pa_mix_info info[MAX_MIX_CHANNELS];
957 unsigned n;
958 size_t length, block_size_max;
959
960 pa_sink_assert_ref(s);
961 pa_sink_assert_io_context(s);
962 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
963 pa_assert(target);
964 pa_assert(target->memblock);
965 pa_assert(target->length > 0);
966 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
967
968 pa_sink_ref(s);
969
970 pa_assert(!s->thread_info.rewind_requested);
971 pa_assert(s->thread_info.rewind_nbytes == 0);
972
973 if (s->thread_info.state == PA_SINK_SUSPENDED) {
974 pa_silence_memchunk(target, &s->sample_spec);
975 return;
976 }
977
978 length = target->length;
979 block_size_max = pa_mempool_block_size_max(s->core->mempool);
980 if (length > block_size_max)
981 length = pa_frame_align(block_size_max, &s->sample_spec);
982
983 pa_assert(length > 0);
984
985 n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
986
987 if (n == 0) {
988 if (target->length > length)
989 target->length = length;
990
991 pa_silence_memchunk(target, &s->sample_spec);
992 } else if (n == 1) {
993 pa_cvolume volume;
994
995 if (target->length > length)
996 target->length = length;
997
998 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
999
1000 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
1001 pa_silence_memchunk(target, &s->sample_spec);
1002 else {
1003 pa_memchunk vchunk;
1004
1005 vchunk = info[0].chunk;
1006 pa_memblock_ref(vchunk.memblock);
1007
1008 if (vchunk.length > length)
1009 vchunk.length = length;
1010
1011 if (!pa_cvolume_is_norm(&volume)) {
1012 pa_memchunk_make_writable(&vchunk, 0);
1013 pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
1014 }
1015
1016 pa_memchunk_memcpy(target, &vchunk);
1017 pa_memblock_unref(vchunk.memblock);
1018 }
1019
1020 } else {
1021 void *ptr;
1022
1023 ptr = pa_memblock_acquire(target->memblock);
1024
1025 target->length = pa_mix(info, n,
1026 (uint8_t*) ptr + target->index, length,
1027 &s->sample_spec,
1028 &s->thread_info.soft_volume,
1029 s->thread_info.soft_muted);
1030
1031 pa_memblock_release(target->memblock);
1032 }
1033
1034 inputs_drop(s, info, n, target);
1035
1036 pa_sink_unref(s);
1037 }
1038
1039 /* Called from IO thread context */
1040 void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
1041 pa_memchunk chunk;
1042 size_t l, d;
1043
1044 pa_sink_assert_ref(s);
1045 pa_sink_assert_io_context(s);
1046 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1047 pa_assert(target);
1048 pa_assert(target->memblock);
1049 pa_assert(target->length > 0);
1050 pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
1051
1052 pa_sink_ref(s);
1053
1054 pa_assert(!s->thread_info.rewind_requested);
1055 pa_assert(s->thread_info.rewind_nbytes == 0);
1056
1057 l = target->length;
1058 d = 0;
1059 while (l > 0) {
1060 chunk = *target;
1061 chunk.index += d;
1062 chunk.length -= d;
1063
1064 pa_sink_render_into(s, &chunk);
1065
1066 d += chunk.length;
1067 l -= chunk.length;
1068 }
1069
1070 pa_sink_unref(s);
1071 }
1072
1073 /* Called from IO thread context */
1074 void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
1075 pa_mix_info info[MAX_MIX_CHANNELS];
1076 size_t length1st = length;
1077 unsigned n;
1078
1079 pa_sink_assert_ref(s);
1080 pa_sink_assert_io_context(s);
1081 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1082 pa_assert(length > 0);
1083 pa_assert(pa_frame_aligned(length, &s->sample_spec));
1084 pa_assert(result);
1085
1086 pa_sink_ref(s);
1087
1088 pa_assert(!s->thread_info.rewind_requested);
1089 pa_assert(s->thread_info.rewind_nbytes == 0);
1090
1091 pa_assert(length > 0);
1092
1093 n = fill_mix_info(s, &length1st, info, MAX_MIX_CHANNELS);
1094
1095 if (n == 0) {
1096 pa_silence_memchunk_get(&s->core->silence_cache,
1097 s->core->mempool,
1098 result,
1099 &s->sample_spec,
1100 length1st);
1101 } else if (n == 1) {
1102 pa_cvolume volume;
1103
1104 *result = info[0].chunk;
1105 pa_memblock_ref(result->memblock);
1106
1107 if (result->length > length)
1108 result->length = length;
1109
1110 pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
1111
1112 if (s->thread_info.soft_muted || !pa_cvolume_is_norm(&volume)) {
1113 if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
1114 pa_memblock_unref(result->memblock);
1115 pa_silence_memchunk_get(&s->core->silence_cache,
1116 s->core->mempool,
1117 result,
1118 &s->sample_spec,
1119 result->length);
1120 } else {
1121 pa_memchunk_make_writable(result, length);
1122 pa_volume_memchunk(result, &s->sample_spec, &volume);
1123 }
1124 }
1125 } else {
1126 void *ptr;
1127
1128 result->index = 0;
1129 result->memblock = pa_memblock_new(s->core->mempool, length);
1130
1131 ptr = pa_memblock_acquire(result->memblock);
1132
1133 result->length = pa_mix(info, n,
1134 (uint8_t*) ptr + result->index, length1st,
1135 &s->sample_spec,
1136 &s->thread_info.soft_volume,
1137 s->thread_info.soft_muted);
1138
1139 pa_memblock_release(result->memblock);
1140 }
1141
1142 inputs_drop(s, info, n, result);
1143
1144 if (result->length < length) {
1145 pa_memchunk chunk;
1146 size_t l, d;
1147 pa_memchunk_make_writable(result, length);
1148
1149 l = length - result->length;
1150 d = result->index + result->length;
1151 while (l > 0) {
1152 chunk = *result;
1153 chunk.index = d;
1154 chunk.length = l;
1155
1156 pa_sink_render_into(s, &chunk);
1157
1158 d += chunk.length;
1159 l -= chunk.length;
1160 }
1161 result->length = length;
1162 }
1163
1164 pa_sink_unref(s);
1165 }
1166
1167 /* Called from main thread */
1168 pa_usec_t pa_sink_get_latency(pa_sink *s) {
1169 pa_usec_t usec = 0;
1170
1171 pa_sink_assert_ref(s);
1172 pa_assert_ctl_context();
1173 pa_assert(PA_SINK_IS_LINKED(s->state));
1174
1175 /* The returned value is supposed to be in the time domain of the sound card! */
1176
1177 if (s->state == PA_SINK_SUSPENDED)
1178 return 0;
1179
1180 if (!(s->flags & PA_SINK_LATENCY))
1181 return 0;
1182
1183 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
1184
1185 return usec;
1186 }
1187
1188 /* Called from IO thread */
1189 pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
1190 pa_usec_t usec = 0;
1191 pa_msgobject *o;
1192
1193 pa_sink_assert_ref(s);
1194 pa_sink_assert_io_context(s);
1195 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
1196
1197 /* The returned value is supposed to be in the time domain of the sound card! */
1198
1199 if (s->thread_info.state == PA_SINK_SUSPENDED)
1200 return 0;
1201
1202 if (!(s->flags & PA_SINK_LATENCY))
1203 return 0;
1204
1205 o = PA_MSGOBJECT(s);
1206
1207 /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
1208
1209 if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
1210 return -1;
1211
1212 return usec;
1213 }
1214
1215 static void compute_new_soft_volume(pa_sink_input *i, const pa_cvolume *new_volume) {
1216 unsigned c;
1217
1218 pa_sink_input_assert_ref(i);
1219 pa_assert(new_volume->channels == i->sample_spec.channels);
1220
1221 /*
1222 * This basically calculates:
1223 *
1224 * i->relative_volume := i->virtual_volume / new_volume
1225 * i->soft_volume := i->relative_volume * i->volume_factor
1226 */
1227
1228 /* The new sink volume passed in here must already be remapped to
1229 * the sink input's channel map! */
1230
1231 i->soft_volume.channels = i->sample_spec.channels;
1232
1233 for (c = 0; c < i->sample_spec.channels; c++)
1234
1235 if (new_volume->values[c] <= PA_VOLUME_MUTED)
1236 /* We leave i->relative_volume untouched */
1237 i->soft_volume.values[c] = PA_VOLUME_MUTED;
1238 else {
1239 i->relative_volume[c] =
1240 pa_sw_volume_to_linear(i->virtual_volume.values[c]) /
1241 pa_sw_volume_to_linear(new_volume->values[c]);
1242
1243 i->soft_volume.values[c] = pa_sw_volume_from_linear(
1244 i->relative_volume[c] *
1245 pa_sw_volume_to_linear(i->volume_factor.values[c]));
1246 }
1247
1248 /* Hooks have the ability to play games with i->soft_volume */
1249 pa_hook_fire(&i->core->hooks[PA_CORE_HOOK_SINK_INPUT_SET_VOLUME], i);
1250
1251 /* We don't copy the soft_volume to the thread_info data
1252 * here. That must be done by the caller */
1253 }
1254
1255 /* Called from main thread */
1256 void pa_sink_update_flat_volume(pa_sink *s, pa_cvolume *new_volume) {
1257 pa_sink_input *i;
1258 uint32_t idx;
1259
1260 pa_sink_assert_ref(s);
1261 pa_assert_ctl_context();
1262 pa_assert(new_volume);
1263 pa_assert(PA_SINK_IS_LINKED(s->state));
1264 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1265
1266 /* This is called whenever a sink input volume changes or a sink
1267 * input is added/removed and we might need to fix up the sink
1268 * volume accordingly. Please note that we don't actually update
1269 * the sinks volume here, we only return how it needs to be
1270 * updated. The caller should then call pa_sink_set_volume().*/
1271
1272 if (pa_idxset_isempty(s->inputs)) {
1273 /* In the special case that we have no sink input we leave the
1274 * volume unmodified. */
1275 *new_volume = s->reference_volume;
1276 return;
1277 }
1278
1279 pa_cvolume_mute(new_volume, s->channel_map.channels);
1280
1281 /* First let's determine the new maximum volume of all inputs
1282 * connected to this sink */
1283 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1284 unsigned c;
1285 pa_cvolume remapped_volume;
1286
1287 remapped_volume = i->virtual_volume;
1288 pa_cvolume_remap(&remapped_volume, &i->channel_map, &s->channel_map);
1289
1290 for (c = 0; c < new_volume->channels; c++)
1291 if (remapped_volume.values[c] > new_volume->values[c])
1292 new_volume->values[c] = remapped_volume.values[c];
1293 }
1294
1295 /* Then, let's update the soft volumes of all inputs connected
1296 * to this sink */
1297 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1298 pa_cvolume remapped_new_volume;
1299
1300 remapped_new_volume = *new_volume;
1301 pa_cvolume_remap(&remapped_new_volume, &s->channel_map, &i->channel_map);
1302 compute_new_soft_volume(i, &remapped_new_volume);
1303
1304 /* We don't copy soft_volume to the thread_info data here
1305 * (i.e. issue PA_SINK_INPUT_MESSAGE_SET_VOLUME) because we
1306 * want the update to be atomically with the sink volume
1307 * update, hence we do it within the pa_sink_set_volume() call
1308 * below */
1309 }
1310 }
1311
1312 /* Called from main thread */
1313 void pa_sink_propagate_flat_volume(pa_sink *s) {
1314 pa_sink_input *i;
1315 uint32_t idx;
1316
1317 pa_sink_assert_ref(s);
1318 pa_assert_ctl_context();
1319 pa_assert(PA_SINK_IS_LINKED(s->state));
1320 pa_assert(s->flags & PA_SINK_FLAT_VOLUME);
1321
1322 /* This is called whenever the sink volume changes that is not
1323 * caused by a sink input volume change. We need to fix up the
1324 * sink input volumes accordingly */
1325
1326 for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx))) {
1327 pa_cvolume sink_volume, new_virtual_volume;
1328 unsigned c;
1329
1330 /* This basically calculates i->virtual_volume := i->relative_volume * s->virtual_volume */
1331
1332 sink_volume = s->virtual_volume;
1333 pa_cvolume_remap(&sink_volume, &s->channel_map, &i->channel_map);
1334
1335 for (c = 0; c < i->sample_spec.channels; c++)
1336 new_virtual_volume.values[c] = pa_sw_volume_from_linear(
1337 i->relative_volume[c] *
1338 pa_sw_volume_to_linear(sink_volume.values[c]));
1339
1340 new_virtual_volume.channels = i->sample_spec.channels;
1341
1342 if (!pa_cvolume_equal(&new_virtual_volume, &i->virtual_volume)) {
1343 i->virtual_volume = new_virtual_volume;
1344
1345 /* Hmm, the soft volume might no longer actually match
1346 * what has been chosen as new virtual volume here,
1347 * especially when the old volume was
1348 * PA_VOLUME_MUTED. Hence let's recalculate the soft
1349 * volumes here. */
1350 compute_new_soft_volume(i, &sink_volume);
1351
1352 /* The virtual volume changed, let's tell people so */
1353 pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
1354 }
1355 }
1356
1357 /* If the soft_volume of any of the sink inputs got changed, let's
1358 * make sure the thread copies are synced up. */
1359 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SYNC_VOLUMES, NULL, 0, NULL) == 0);
1360 }
1361
1362 /* Called from main thread */
1363 void pa_sink_set_volume(pa_sink *s, const pa_cvolume *volume, pa_bool_t propagate, pa_bool_t sendmsg, pa_bool_t become_reference, pa_bool_t save) {
1364 pa_bool_t virtual_volume_changed;
1365
1366 pa_sink_assert_ref(s);
1367 pa_assert_ctl_context();
1368 pa_assert(PA_SINK_IS_LINKED(s->state));
1369 pa_assert(volume);
1370 pa_assert(pa_cvolume_valid(volume));
1371 pa_assert(pa_cvolume_compatible(volume, &s->sample_spec));
1372
1373 virtual_volume_changed = !pa_cvolume_equal(volume, &s->virtual_volume);
1374 s->virtual_volume = *volume;
1375 s->save_volume = (!virtual_volume_changed && s->save_volume) || save;
1376
1377 if (become_reference)
1378 s->reference_volume = s->virtual_volume;
1379
1380 /* Propagate this volume change back to the inputs */
1381 if (virtual_volume_changed)
1382 if (propagate && (s->flags & PA_SINK_FLAT_VOLUME))
1383 pa_sink_propagate_flat_volume(s);
1384
1385 if (s->set_volume) {
1386 /* If we have a function set_volume(), then we do not apply a
1387 * soft volume by default. However, set_volume() is free to
1388 * apply one to s->soft_volume */
1389
1390 pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
1391 s->set_volume(s);
1392
1393 } else
1394 /* If we have no function set_volume(), then the soft volume
1395 * becomes the virtual volume */
1396 s->soft_volume = s->virtual_volume;
1397
1398 /* This tells the sink that soft and/or virtual volume changed */
1399 if (sendmsg)
1400 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1401
1402 if (virtual_volume_changed)
1403 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1404 }
1405
1406 /* Called from main thread. Only to be called by sink implementor */
1407 void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
1408 pa_sink_assert_ref(s);
1409 pa_assert_ctl_context();
1410 pa_assert(volume);
1411
1412 s->soft_volume = *volume;
1413
1414 if (PA_SINK_IS_LINKED(s->state))
1415 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
1416 else
1417 s->thread_info.soft_volume = *volume;
1418 }
1419
1420 /* Called from main thread */
1421 const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh, pa_bool_t reference) {
1422 pa_sink_assert_ref(s);
1423 pa_assert_ctl_context();
1424 pa_assert(PA_SINK_IS_LINKED(s->state));
1425
1426 if (s->refresh_volume || force_refresh) {
1427 struct pa_cvolume old_virtual_volume = s->virtual_volume;
1428
1429 if (s->get_volume)
1430 s->get_volume(s);
1431
1432 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
1433
1434 if (!pa_cvolume_equal(&old_virtual_volume, &s->virtual_volume)) {
1435
1436 s->reference_volume = s->virtual_volume;
1437
1438 /* Something got changed in the hardware. It probably
1439 * makes sense to save changed hw settings given that hw
1440 * volume changes not triggered by PA are almost certainly
1441 * done by the user. */
1442 s->save_volume = TRUE;
1443
1444 if (s->flags & PA_SINK_FLAT_VOLUME)
1445 pa_sink_propagate_flat_volume(s);
1446
1447 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1448 }
1449 }
1450
1451 return reference ? &s->reference_volume : &s->virtual_volume;
1452 }
1453
1454 /* Called from main thread */
1455 void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_volume) {
1456 pa_sink_assert_ref(s);
1457 pa_assert_ctl_context();
1458 pa_assert(PA_SINK_IS_LINKED(s->state));
1459
1460 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1461 if (pa_cvolume_equal(&s->virtual_volume, new_volume))
1462 return;
1463
1464 s->reference_volume = s->virtual_volume = *new_volume;
1465 s->save_volume = TRUE;
1466
1467 if (s->flags & PA_SINK_FLAT_VOLUME)
1468 pa_sink_propagate_flat_volume(s);
1469
1470 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1471 }
1472
1473 /* Called from main thread */
1474 void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
1475 pa_bool_t old_muted;
1476
1477 pa_sink_assert_ref(s);
1478 pa_assert_ctl_context();
1479 pa_assert(PA_SINK_IS_LINKED(s->state));
1480
1481 old_muted = s->muted;
1482 s->muted = mute;
1483 s->save_muted = (old_muted == s->muted && s->save_muted) || save;
1484
1485 if (s->set_mute)
1486 s->set_mute(s);
1487
1488 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1489
1490 if (old_muted != s->muted)
1491 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1492 }
1493
1494 /* Called from main thread */
1495 pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
1496
1497 pa_sink_assert_ref(s);
1498 pa_assert_ctl_context();
1499 pa_assert(PA_SINK_IS_LINKED(s->state));
1500
1501 if (s->refresh_muted || force_refresh) {
1502 pa_bool_t old_muted = s->muted;
1503
1504 if (s->get_mute)
1505 s->get_mute(s);
1506
1507 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
1508
1509 if (old_muted != s->muted) {
1510 s->save_muted = TRUE;
1511
1512 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1513
1514 /* Make sure the soft mute status stays in sync */
1515 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
1516 }
1517 }
1518
1519
1520 return s->muted;
1521 }
1522
1523 /* Called from main thread */
1524 void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
1525 pa_sink_assert_ref(s);
1526 pa_assert_ctl_context();
1527 pa_assert(PA_SINK_IS_LINKED(s->state));
1528
1529 /* The sink implementor may call this if the volume changed to make sure everyone is notified */
1530
1531 if (s->muted == new_muted)
1532 return;
1533
1534 s->muted = new_muted;
1535 s->save_muted = TRUE;
1536
1537 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1538 }
1539
1540 /* Called from main thread */
1541 pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
1542 pa_sink_assert_ref(s);
1543 pa_assert_ctl_context();
1544
1545 if (p)
1546 pa_proplist_update(s->proplist, mode, p);
1547
1548 if (PA_SINK_IS_LINKED(s->state)) {
1549 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1550 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1551 }
1552
1553 return TRUE;
1554 }
1555
1556 /* Called from main thread */
1557 /* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
1558 void pa_sink_set_description(pa_sink *s, const char *description) {
1559 const char *old;
1560 pa_sink_assert_ref(s);
1561 pa_assert_ctl_context();
1562
1563 if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
1564 return;
1565
1566 old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1567
1568 if (old && description && pa_streq(old, description))
1569 return;
1570
1571 if (description)
1572 pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
1573 else
1574 pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
1575
1576 if (s->monitor_source) {
1577 char *n;
1578
1579 n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
1580 pa_source_set_description(s->monitor_source, n);
1581 pa_xfree(n);
1582 }
1583
1584 if (PA_SINK_IS_LINKED(s->state)) {
1585 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
1586 pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
1587 }
1588 }
1589
1590 /* Called from main thread */
1591 unsigned pa_sink_linked_by(pa_sink *s) {
1592 unsigned ret;
1593
1594 pa_sink_assert_ref(s);
1595 pa_assert_ctl_context();
1596 pa_assert(PA_SINK_IS_LINKED(s->state));
1597
1598 ret = pa_idxset_size(s->inputs);
1599
1600 /* We add in the number of streams connected to us here. Please
1601 * note the asymmmetry to pa_sink_used_by()! */
1602
1603 if (s->monitor_source)
1604 ret += pa_source_linked_by(s->monitor_source);
1605
1606 return ret;
1607 }
1608
1609 /* Called from main thread */
1610 unsigned pa_sink_used_by(pa_sink *s) {
1611 unsigned ret;
1612
1613 pa_sink_assert_ref(s);
1614 pa_assert_ctl_context();
1615 pa_assert(PA_SINK_IS_LINKED(s->state));
1616
1617 ret = pa_idxset_size(s->inputs);
1618 pa_assert(ret >= s->n_corked);
1619
1620 /* Streams connected to our monitor source do not matter for
1621 * pa_sink_used_by()!.*/
1622
1623 return ret - s->n_corked;
1624 }
1625
1626 /* Called from main thread */
1627 unsigned pa_sink_check_suspend(pa_sink *s) {
1628 unsigned ret;
1629 pa_sink_input *i;
1630 uint32_t idx;
1631
1632 pa_sink_assert_ref(s);
1633 pa_assert_ctl_context();
1634
1635 if (!PA_SINK_IS_LINKED(s->state))
1636 return 0;
1637
1638 ret = 0;
1639
1640 PA_IDXSET_FOREACH(i, s->inputs, idx) {
1641 pa_sink_input_state_t st;
1642
1643 st = pa_sink_input_get_state(i);
1644 pa_assert(PA_SINK_INPUT_IS_LINKED(st));
1645
1646 if (st == PA_SINK_INPUT_CORKED)
1647 continue;
1648
1649 if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
1650 continue;
1651
1652 ret ++;
1653 }
1654
1655 if (s->monitor_source)
1656 ret += pa_source_check_suspend(s->monitor_source);
1657
1658 return ret;
1659 }
1660
1661 /* Called from the IO thread */
1662 static void sync_input_volumes_within_thread(pa_sink *s) {
1663 pa_sink_input *i;
1664 void *state = NULL;
1665
1666 pa_sink_assert_ref(s);
1667 pa_sink_assert_io_context(s);
1668
1669 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
1670 if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
1671 continue;
1672
1673 i->thread_info.soft_volume = i->soft_volume;
1674 pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
1675 }
1676 }
1677
1678 /* Called from IO thread, except when it is not */
1679 int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1680 pa_sink *s = PA_SINK(o);
1681 pa_sink_assert_ref(s);
1682
1683 switch ((pa_sink_message_t) code) {
1684
1685 case PA_SINK_MESSAGE_ADD_INPUT: {
1686 pa_sink_input *i = PA_SINK_INPUT(userdata);
1687
1688 /* If you change anything here, make sure to change the
1689 * sink input handling a few lines down at
1690 * PA_SINK_MESSAGE_FINISH_MOVE, too. */
1691
1692 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1693
1694 /* Since the caller sleeps in pa_sink_input_put(), we can
1695 * safely access data outside of thread_info even though
1696 * it is mutable */
1697
1698 if ((i->thread_info.sync_prev = i->sync_prev)) {
1699 pa_assert(i->sink == i->thread_info.sync_prev->sink);
1700 pa_assert(i->sync_prev->sync_next == i);
1701 i->thread_info.sync_prev->thread_info.sync_next = i;
1702 }
1703
1704 if ((i->thread_info.sync_next = i->sync_next)) {
1705 pa_assert(i->sink == i->thread_info.sync_next->sink);
1706 pa_assert(i->sync_next->sync_prev == i);
1707 i->thread_info.sync_next->thread_info.sync_prev = i;
1708 }
1709
1710 pa_assert(!i->thread_info.attached);
1711 i->thread_info.attached = TRUE;
1712
1713 if (i->attach)
1714 i->attach(i);
1715
1716 pa_sink_input_set_state_within_thread(i, i->state);
1717
1718 /* The requested latency of the sink input needs to be
1719 * fixed up and then configured on the sink */
1720
1721 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1722 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1723
1724 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1725 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1726
1727 /* We don't rewind here automatically. This is left to the
1728 * sink input implementor because some sink inputs need a
1729 * slow start, i.e. need some time to buffer client
1730 * samples before beginning streaming. */
1731
1732 /* In flat volume mode we need to update the volume as
1733 * well */
1734 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1735 }
1736
1737 case PA_SINK_MESSAGE_REMOVE_INPUT: {
1738 pa_sink_input *i = PA_SINK_INPUT(userdata);
1739
1740 /* If you change anything here, make sure to change the
1741 * sink input handling a few lines down at
1742 * PA_SINK_MESSAGE_PREPAPRE_MOVE, too. */
1743
1744 if (i->detach)
1745 i->detach(i);
1746
1747 pa_sink_input_set_state_within_thread(i, i->state);
1748
1749 pa_assert(i->thread_info.attached);
1750 i->thread_info.attached = FALSE;
1751
1752 /* Since the caller sleeps in pa_sink_input_unlink(),
1753 * we can safely access data outside of thread_info even
1754 * though it is mutable */
1755
1756 pa_assert(!i->sync_prev);
1757 pa_assert(!i->sync_next);
1758
1759 if (i->thread_info.sync_prev) {
1760 i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
1761 i->thread_info.sync_prev = NULL;
1762 }
1763
1764 if (i->thread_info.sync_next) {
1765 i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
1766 i->thread_info.sync_next = NULL;
1767 }
1768
1769 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1770 pa_sink_input_unref(i);
1771
1772 pa_sink_invalidate_requested_latency(s, TRUE);
1773 pa_sink_request_rewind(s, (size_t) -1);
1774
1775 /* In flat volume mode we need to update the volume as
1776 * well */
1777 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1778 }
1779
1780 case PA_SINK_MESSAGE_START_MOVE: {
1781 pa_sink_input *i = PA_SINK_INPUT(userdata);
1782
1783 /* We don't support moving synchronized streams. */
1784 pa_assert(!i->sync_prev);
1785 pa_assert(!i->sync_next);
1786 pa_assert(!i->thread_info.sync_next);
1787 pa_assert(!i->thread_info.sync_prev);
1788
1789 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1790 pa_usec_t usec = 0;
1791 size_t sink_nbytes, total_nbytes;
1792
1793 /* Get the latency of the sink */
1794 usec = pa_sink_get_latency_within_thread(s);
1795 sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1796 total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
1797
1798 if (total_nbytes > 0) {
1799 i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
1800 i->thread_info.rewrite_flush = TRUE;
1801 pa_sink_input_process_rewind(i, sink_nbytes);
1802 }
1803 }
1804
1805 if (i->detach)
1806 i->detach(i);
1807
1808 pa_assert(i->thread_info.attached);
1809 i->thread_info.attached = FALSE;
1810
1811 /* Let's remove the sink input ...*/
1812 if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
1813 pa_sink_input_unref(i);
1814
1815 pa_sink_invalidate_requested_latency(s, TRUE);
1816
1817 pa_log_debug("Requesting rewind due to started move");
1818 pa_sink_request_rewind(s, (size_t) -1);
1819
1820 /* In flat volume mode we need to update the volume as
1821 * well */
1822 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1823 }
1824
1825 case PA_SINK_MESSAGE_FINISH_MOVE: {
1826 pa_sink_input *i = PA_SINK_INPUT(userdata);
1827
1828 /* We don't support moving synchronized streams. */
1829 pa_assert(!i->sync_prev);
1830 pa_assert(!i->sync_next);
1831 pa_assert(!i->thread_info.sync_next);
1832 pa_assert(!i->thread_info.sync_prev);
1833
1834 pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
1835
1836 pa_assert(!i->thread_info.attached);
1837 i->thread_info.attached = TRUE;
1838
1839 if (i->attach)
1840 i->attach(i);
1841
1842 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
1843 pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
1844
1845 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
1846 pa_sink_input_update_max_request(i, s->thread_info.max_request);
1847
1848 if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
1849 pa_usec_t usec = 0;
1850 size_t nbytes;
1851
1852 /* Get the latency of the sink */
1853 usec = pa_sink_get_latency_within_thread(s);
1854 nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
1855
1856 if (nbytes > 0)
1857 pa_sink_input_drop(i, nbytes);
1858
1859 pa_log_debug("Requesting rewind due to finished move");
1860 pa_sink_request_rewind(s, nbytes);
1861 }
1862
1863 /* In flat volume mode we need to update the volume as
1864 * well */
1865 return o->process_msg(o, PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL);
1866 }
1867
1868 case PA_SINK_MESSAGE_SET_VOLUME:
1869
1870 if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
1871 s->thread_info.soft_volume = s->soft_volume;
1872 pa_sink_request_rewind(s, (size_t) -1);
1873 }
1874
1875 if (!(s->flags & PA_SINK_FLAT_VOLUME))
1876 return 0;
1877
1878 /* Fall through ... */
1879
1880 case PA_SINK_MESSAGE_SYNC_VOLUMES:
1881 sync_input_volumes_within_thread(s);
1882 return 0;
1883
1884 case PA_SINK_MESSAGE_GET_VOLUME:
1885 return 0;
1886
1887 case PA_SINK_MESSAGE_SET_MUTE:
1888
1889 if (s->thread_info.soft_muted != s->muted) {
1890 s->thread_info.soft_muted = s->muted;
1891 pa_sink_request_rewind(s, (size_t) -1);
1892 }
1893
1894 return 0;
1895
1896 case PA_SINK_MESSAGE_GET_MUTE:
1897 return 0;
1898
1899 case PA_SINK_MESSAGE_SET_STATE: {
1900
1901 pa_bool_t suspend_change =
1902 (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
1903 (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
1904
1905 s->thread_info.state = PA_PTR_TO_UINT(userdata);
1906
1907 if (s->thread_info.state == PA_SINK_SUSPENDED) {
1908 s->thread_info.rewind_nbytes = 0;
1909 s->thread_info.rewind_requested = FALSE;
1910 }
1911
1912 if (suspend_change) {
1913 pa_sink_input *i;
1914 void *state = NULL;
1915
1916 while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
1917 if (i->suspend_within_thread)
1918 i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
1919 }
1920
1921 return 0;
1922 }
1923
1924 case PA_SINK_MESSAGE_DETACH:
1925
1926 /* Detach all streams */
1927 pa_sink_detach_within_thread(s);
1928 return 0;
1929
1930 case PA_SINK_MESSAGE_ATTACH:
1931
1932 /* Reattach all streams */
1933 pa_sink_attach_within_thread(s);
1934 return 0;
1935
1936 case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
1937
1938 pa_usec_t *usec = userdata;
1939 *usec = pa_sink_get_requested_latency_within_thread(s);
1940
1941 /* Yes, that's right, the IO thread will see -1 when no
1942 * explicit requested latency is configured, the main
1943 * thread will see max_latency */
1944 if (*usec == (pa_usec_t) -1)
1945 *usec = s->thread_info.max_latency;
1946
1947 return 0;
1948 }
1949
1950 case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
1951 pa_usec_t *r = userdata;
1952
1953 pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
1954
1955 return 0;
1956 }
1957
1958 case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
1959 pa_usec_t *r = userdata;
1960
1961 r[0] = s->thread_info.min_latency;
1962 r[1] = s->thread_info.max_latency;
1963
1964 return 0;
1965 }
1966
1967 case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
1968
1969 *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
1970 return 0;
1971
1972 case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
1973
1974 pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
1975 return 0;
1976
1977 case PA_SINK_MESSAGE_GET_MAX_REWIND:
1978
1979 *((size_t*) userdata) = s->thread_info.max_rewind;
1980 return 0;
1981
1982 case PA_SINK_MESSAGE_GET_MAX_REQUEST:
1983
1984 *((size_t*) userdata) = s->thread_info.max_request;
1985 return 0;
1986
1987 case PA_SINK_MESSAGE_SET_MAX_REWIND:
1988
1989 pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
1990 return 0;
1991
1992 case PA_SINK_MESSAGE_SET_MAX_REQUEST:
1993
1994 pa_sink_set_max_request_within_thread(s, (size_t) offset);
1995 return 0;
1996
1997 case PA_SINK_MESSAGE_GET_LATENCY:
1998 case PA_SINK_MESSAGE_MAX:
1999 ;
2000 }
2001
2002 return -1;
2003 }
2004
2005 /* Called from main thread */
2006 int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
2007 pa_sink *sink;
2008 uint32_t idx;
2009 int ret = 0;
2010
2011 pa_core_assert_ref(c);
2012 pa_assert_ctl_context();
2013 pa_assert(cause != 0);
2014
2015 PA_IDXSET_FOREACH(sink, c->sinks, idx) {
2016 int r;
2017
2018 if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
2019 ret = r;
2020 }
2021
2022 return ret;
2023 }
2024
2025 /* Called from main thread */
2026 void pa_sink_detach(pa_sink *s) {
2027 pa_sink_assert_ref(s);
2028 pa_assert_ctl_context();
2029 pa_assert(PA_SINK_IS_LINKED(s->state));
2030
2031 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
2032 }
2033
2034 /* Called from main thread */
2035 void pa_sink_attach(pa_sink *s) {
2036 pa_sink_assert_ref(s);
2037 pa_assert_ctl_context();
2038 pa_assert(PA_SINK_IS_LINKED(s->state));
2039
2040 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
2041 }
2042
2043 /* Called from IO thread */
2044 void pa_sink_detach_within_thread(pa_sink *s) {
2045 pa_sink_input *i;
2046 void *state = NULL;
2047
2048 pa_sink_assert_ref(s);
2049 pa_sink_assert_io_context(s);
2050 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2051
2052 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2053 if (i->detach)
2054 i->detach(i);
2055
2056 if (s->monitor_source)
2057 pa_source_detach_within_thread(s->monitor_source);
2058 }
2059
2060 /* Called from IO thread */
2061 void pa_sink_attach_within_thread(pa_sink *s) {
2062 pa_sink_input *i;
2063 void *state = NULL;
2064
2065 pa_sink_assert_ref(s);
2066 pa_sink_assert_io_context(s);
2067 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2068
2069 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2070 if (i->attach)
2071 i->attach(i);
2072
2073 if (s->monitor_source)
2074 pa_source_attach_within_thread(s->monitor_source);
2075 }
2076
2077 /* Called from IO thread */
2078 void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
2079 pa_sink_assert_ref(s);
2080 pa_sink_assert_io_context(s);
2081 pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
2082
2083 if (s->thread_info.state == PA_SINK_SUSPENDED)
2084 return;
2085
2086 if (nbytes == (size_t) -1)
2087 nbytes = s->thread_info.max_rewind;
2088
2089 nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
2090
2091 if (s->thread_info.rewind_requested &&
2092 nbytes <= s->thread_info.rewind_nbytes)
2093 return;
2094
2095 s->thread_info.rewind_nbytes = nbytes;
2096 s->thread_info.rewind_requested = TRUE;
2097
2098 if (s->request_rewind)
2099 s->request_rewind(s);
2100 }
2101
2102 /* Called from IO thread */
2103 pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
2104 pa_usec_t result = (pa_usec_t) -1;
2105 pa_sink_input *i;
2106 void *state = NULL;
2107 pa_usec_t monitor_latency;
2108
2109 pa_sink_assert_ref(s);
2110 pa_sink_assert_io_context(s);
2111
2112 if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
2113 return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
2114
2115 if (s->thread_info.requested_latency_valid)
2116 return s->thread_info.requested_latency;
2117
2118 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2119 if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
2120 (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
2121 result = i->thread_info.requested_sink_latency;
2122
2123 monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
2124
2125 if (monitor_latency != (pa_usec_t) -1 &&
2126 (result == (pa_usec_t) -1 || result > monitor_latency))
2127 result = monitor_latency;
2128
2129 if (result != (pa_usec_t) -1)
2130 result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
2131
2132 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2133 /* Only cache if properly initialized */
2134 s->thread_info.requested_latency = result;
2135 s->thread_info.requested_latency_valid = TRUE;
2136 }
2137
2138 return result;
2139 }
2140
2141 /* Called from main thread */
2142 pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
2143 pa_usec_t usec = 0;
2144
2145 pa_sink_assert_ref(s);
2146 pa_assert_ctl_context();
2147 pa_assert(PA_SINK_IS_LINKED(s->state));
2148
2149 if (s->state == PA_SINK_SUSPENDED)
2150 return 0;
2151
2152 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
2153 return usec;
2154 }
2155
2156 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2157 void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
2158 pa_sink_input *i;
2159 void *state = NULL;
2160
2161 pa_sink_assert_ref(s);
2162 pa_sink_assert_io_context(s);
2163
2164 if (max_rewind == s->thread_info.max_rewind)
2165 return;
2166
2167 s->thread_info.max_rewind = max_rewind;
2168
2169 if (PA_SINK_IS_LINKED(s->thread_info.state))
2170 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2171 pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
2172
2173 if (s->monitor_source)
2174 pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
2175 }
2176
2177 /* Called from main thread */
2178 void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
2179 pa_sink_assert_ref(s);
2180 pa_assert_ctl_context();
2181
2182 if (PA_SINK_IS_LINKED(s->state))
2183 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
2184 else
2185 pa_sink_set_max_rewind_within_thread(s, max_rewind);
2186 }
2187
2188 /* Called from IO as well as the main thread -- the latter only before the IO thread started up */
2189 void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
2190 void *state = NULL;
2191
2192 pa_sink_assert_ref(s);
2193 pa_sink_assert_io_context(s);
2194
2195 if (max_request == s->thread_info.max_request)
2196 return;
2197
2198 s->thread_info.max_request = max_request;
2199
2200 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2201 pa_sink_input *i;
2202
2203 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2204 pa_sink_input_update_max_request(i, s->thread_info.max_request);
2205 }
2206 }
2207
2208 /* Called from main thread */
2209 void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
2210 pa_sink_assert_ref(s);
2211 pa_assert_ctl_context();
2212
2213 if (PA_SINK_IS_LINKED(s->state))
2214 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
2215 else
2216 pa_sink_set_max_request_within_thread(s, max_request);
2217 }
2218
2219 /* Called from IO thread */
2220 void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
2221 pa_sink_input *i;
2222 void *state = NULL;
2223
2224 pa_sink_assert_ref(s);
2225 pa_sink_assert_io_context(s);
2226
2227 if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
2228 s->thread_info.requested_latency_valid = FALSE;
2229 else if (dynamic)
2230 return;
2231
2232 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2233
2234 if (s->update_requested_latency)
2235 s->update_requested_latency(s);
2236
2237 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2238 if (i->update_sink_requested_latency)
2239 i->update_sink_requested_latency(i);
2240 }
2241 }
2242
2243 /* Called from main thread */
2244 void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2245 pa_sink_assert_ref(s);
2246 pa_assert_ctl_context();
2247
2248 /* min_latency == 0: no limit
2249 * min_latency anything else: specified limit
2250 *
2251 * Similar for max_latency */
2252
2253 if (min_latency < ABSOLUTE_MIN_LATENCY)
2254 min_latency = ABSOLUTE_MIN_LATENCY;
2255
2256 if (max_latency <= 0 ||
2257 max_latency > ABSOLUTE_MAX_LATENCY)
2258 max_latency = ABSOLUTE_MAX_LATENCY;
2259
2260 pa_assert(min_latency <= max_latency);
2261
2262 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2263 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2264 max_latency == ABSOLUTE_MAX_LATENCY) ||
2265 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2266
2267 if (PA_SINK_IS_LINKED(s->state)) {
2268 pa_usec_t r[2];
2269
2270 r[0] = min_latency;
2271 r[1] = max_latency;
2272
2273 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
2274 } else
2275 pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
2276 }
2277
2278 /* Called from main thread */
2279 void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
2280 pa_sink_assert_ref(s);
2281 pa_assert_ctl_context();
2282 pa_assert(min_latency);
2283 pa_assert(max_latency);
2284
2285 if (PA_SINK_IS_LINKED(s->state)) {
2286 pa_usec_t r[2] = { 0, 0 };
2287
2288 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
2289
2290 *min_latency = r[0];
2291 *max_latency = r[1];
2292 } else {
2293 *min_latency = s->thread_info.min_latency;
2294 *max_latency = s->thread_info.max_latency;
2295 }
2296 }
2297
2298 /* Called from IO thread */
2299 void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
2300 pa_sink_assert_ref(s);
2301 pa_sink_assert_io_context(s);
2302
2303 pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
2304 pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
2305 pa_assert(min_latency <= max_latency);
2306
2307 /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
2308 pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
2309 max_latency == ABSOLUTE_MAX_LATENCY) ||
2310 (s->flags & PA_SINK_DYNAMIC_LATENCY));
2311
2312 if (s->thread_info.min_latency == min_latency &&
2313 s->thread_info.max_latency == max_latency)
2314 return;
2315
2316 s->thread_info.min_latency = min_latency;
2317 s->thread_info.max_latency = max_latency;
2318
2319 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2320 pa_sink_input *i;
2321 void *state = NULL;
2322
2323 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2324 if (i->update_sink_latency_range)
2325 i->update_sink_latency_range(i);
2326 }
2327
2328 pa_sink_invalidate_requested_latency(s, FALSE);
2329
2330 pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
2331 }
2332
2333 /* Called from main thread */
2334 void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
2335 pa_sink_assert_ref(s);
2336 pa_assert_ctl_context();
2337
2338 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2339 pa_assert(latency == 0);
2340 return;
2341 }
2342
2343 if (latency < ABSOLUTE_MIN_LATENCY)
2344 latency = ABSOLUTE_MIN_LATENCY;
2345
2346 if (latency > ABSOLUTE_MAX_LATENCY)
2347 latency = ABSOLUTE_MAX_LATENCY;
2348
2349 if (PA_SINK_IS_LINKED(s->state))
2350 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
2351 else
2352 s->thread_info.fixed_latency = latency;
2353
2354 pa_source_set_fixed_latency(s->monitor_source, latency);
2355 }
2356
2357 /* Called from main thread */
2358 pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
2359 pa_usec_t latency;
2360
2361 pa_sink_assert_ref(s);
2362 pa_assert_ctl_context();
2363
2364 if (s->flags & PA_SINK_DYNAMIC_LATENCY)
2365 return 0;
2366
2367 if (PA_SINK_IS_LINKED(s->state))
2368 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
2369 else
2370 latency = s->thread_info.fixed_latency;
2371
2372 return latency;
2373 }
2374
2375 /* Called from IO thread */
2376 void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
2377 pa_sink_assert_ref(s);
2378 pa_sink_assert_io_context(s);
2379
2380 if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
2381 pa_assert(latency == 0);
2382 return;
2383 }
2384
2385 pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
2386 pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
2387
2388 if (s->thread_info.fixed_latency == latency)
2389 return;
2390
2391 s->thread_info.fixed_latency = latency;
2392
2393 if (PA_SINK_IS_LINKED(s->thread_info.state)) {
2394 pa_sink_input *i;
2395 void *state = NULL;
2396
2397 PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
2398 if (i->update_sink_fixed_latency)
2399 i->update_sink_fixed_latency(i);
2400 }
2401
2402 pa_sink_invalidate_requested_latency(s, FALSE);
2403
2404 pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
2405 }
2406
2407 /* Called from main context */
2408 size_t pa_sink_get_max_rewind(pa_sink *s) {
2409 size_t r;
2410 pa_sink_assert_ref(s);
2411 pa_assert_ctl_context();
2412
2413 if (!PA_SINK_IS_LINKED(s->state))
2414 return s->thread_info.max_rewind;
2415
2416 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
2417
2418 return r;
2419 }
2420
2421 /* Called from main context */
2422 size_t pa_sink_get_max_request(pa_sink *s) {
2423 size_t r;
2424 pa_sink_assert_ref(s);
2425 pa_assert_ctl_context();
2426
2427 if (!PA_SINK_IS_LINKED(s->state))
2428 return s->thread_info.max_request;
2429
2430 pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
2431
2432 return r;
2433 }
2434
2435 /* Called from main context */
2436 int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
2437 pa_device_port *port;
2438
2439 pa_sink_assert_ref(s);
2440 pa_assert_ctl_context();
2441
2442 if (!s->set_port) {
2443 pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
2444 return -PA_ERR_NOTIMPLEMENTED;
2445 }
2446
2447 if (!s->ports)
2448 return -PA_ERR_NOENTITY;
2449
2450 if (!(port = pa_hashmap_get(s->ports, name)))
2451 return -PA_ERR_NOENTITY;
2452
2453 if (s->active_port == port) {
2454 s->save_port = s->save_port || save;
2455 return 0;
2456 }
2457
2458 if ((s->set_port(s, port)) < 0)
2459 return -PA_ERR_NOENTITY;
2460
2461 pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
2462
2463 pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
2464
2465 s->active_port = port;
2466 s->save_port = save;
2467
2468 return 0;
2469 }
2470
2471 pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
2472 const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
2473
2474 pa_assert(p);
2475
2476 if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
2477 return TRUE;
2478
2479 if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
2480
2481 if (pa_streq(ff, "microphone"))
2482 t = "audio-input-microphone";
2483 else if (pa_streq(ff, "webcam"))
2484 t = "camera-web";
2485 else if (pa_streq(ff, "computer"))
2486 t = "computer";
2487 else if (pa_streq(ff, "handset"))
2488 t = "phone";
2489 else if (pa_streq(ff, "portable"))
2490 t = "multimedia-player";
2491 else if (pa_streq(ff, "tv"))
2492 t = "video-display";
2493
2494 /*
2495 * The following icons are not part of the icon naming spec,
2496 * because Rodney Dawes sucks as the maintainer of that spec.
2497 *
2498 * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
2499 */
2500 else if (pa_streq(ff, "headset"))
2501 t = "audio-headset";
2502 else if (pa_streq(ff, "headphone"))
2503 t = "audio-headphones";
2504 else if (pa_streq(ff, "speaker"))
2505 t = "audio-speakers";
2506 else if (pa_streq(ff, "hands-free"))
2507 t = "audio-handsfree";
2508 }
2509
2510 if (!t)
2511 if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2512 if (pa_streq(c, "modem"))
2513 t = "modem";
2514
2515 if (!t) {
2516 if (is_sink)
2517 t = "audio-card";
2518 else
2519 t = "audio-input-microphone";
2520 }
2521
2522 if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
2523 if (strstr(profile, "analog"))
2524 s = "-analog";
2525 else if (strstr(profile, "iec958"))
2526 s = "-iec958";
2527 else if (strstr(profile, "hdmi"))
2528 s = "-hdmi";
2529 }
2530
2531 bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
2532
2533 pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
2534
2535 return TRUE;
2536 }
2537
2538 pa_bool_t pa_device_init_description(pa_proplist *p) {
2539 const char *s, *d = NULL, *k;
2540 pa_assert(p);
2541
2542 if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
2543 return TRUE;
2544
2545 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2546 if (pa_streq(s, "internal"))
2547 d = _("Internal Audio");
2548
2549 if (!d)
2550 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
2551 if (pa_streq(s, "modem"))
2552 d = _("Modem");
2553
2554 if (!d)
2555 d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
2556
2557 if (!d)
2558 return FALSE;
2559
2560 k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
2561
2562 if (d && k)
2563 pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
2564 else if (d)
2565 pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
2566
2567 return TRUE;
2568 }
2569
2570 pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
2571 const char *s;
2572 pa_assert(p);
2573
2574 if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
2575 return TRUE;
2576
2577 if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
2578 if (pa_streq(s, "handset") || pa_streq(s, "hands-free")) {
2579 pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
2580 return TRUE;
2581 }
2582
2583 return FALSE;
2584 }