]> code.delx.au - pulseaudio/blob - src/modules/module-combine.c
build-sys: dropo shave support, depend on automake's new silent build support instead
[pulseaudio] / src / modules / module-combine.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28
29 #include <pulse/timeval.h>
30 #include <pulse/xmalloc.h>
31
32 #include <pulsecore/macro.h>
33 #include <pulsecore/module.h>
34 #include <pulsecore/llist.h>
35 #include <pulsecore/sink.h>
36 #include <pulsecore/sink-input.h>
37 #include <pulsecore/memblockq.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/core-util.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/namereg.h>
42 #include <pulsecore/mutex.h>
43 #include <pulsecore/thread.h>
44 #include <pulsecore/thread-mq.h>
45 #include <pulsecore/rtpoll.h>
46 #include <pulsecore/rtclock.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/time-smoother.h>
49
50 #include "module-combine-symdef.h"
51
52 PA_MODULE_AUTHOR("Lennart Poettering");
53 PA_MODULE_DESCRIPTION("Combine multiple sinks to one");
54 PA_MODULE_VERSION(PACKAGE_VERSION);
55 PA_MODULE_LOAD_ONCE(FALSE);
56 PA_MODULE_USAGE(
57 "sink_name=<name for the sink> "
58 "sink_properties=<properties for the sink> "
59 "slaves=<slave sinks> "
60 "adjust_time=<seconds> "
61 "resample_method=<method> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map>");
66
67 #define DEFAULT_SINK_NAME "combined"
68
69 #define MEMBLOCKQ_MAXLENGTH (1024*1024*16)
70
71 #define DEFAULT_ADJUST_TIME 10
72
73 #define BLOCK_USEC (PA_USEC_PER_MSEC * 200)
74
75 static const char* const valid_modargs[] = {
76 "sink_name",
77 "sink_properties",
78 "slaves",
79 "adjust_time",
80 "resample_method",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 NULL
86 };
87
88 struct output {
89 struct userdata *userdata;
90
91 pa_sink *sink;
92 pa_sink_input *sink_input;
93
94 pa_asyncmsgq *inq, /* Message queue from the sink thread to this sink input */
95 *outq; /* Message queue from this sink input to the sink thread */
96 pa_rtpoll_item *inq_rtpoll_item_read, *inq_rtpoll_item_write;
97 pa_rtpoll_item *outq_rtpoll_item_read, *outq_rtpoll_item_write;
98
99 pa_memblockq *memblockq;
100
101 pa_usec_t total_latency;
102
103 pa_atomic_t max_request;
104
105 PA_LLIST_FIELDS(struct output);
106 };
107
108 struct userdata {
109 pa_core *core;
110 pa_module *module;
111 pa_sink *sink;
112
113 pa_thread *thread;
114 pa_thread_mq thread_mq;
115 pa_rtpoll *rtpoll;
116
117 pa_time_event *time_event;
118 uint32_t adjust_time;
119
120 pa_bool_t automatic;
121
122 pa_hook_slot *sink_put_slot, *sink_unlink_slot, *sink_state_changed_slot;
123
124 pa_resample_method_t resample_method;
125
126 struct timeval adjust_timestamp;
127
128 pa_usec_t block_usec;
129
130 pa_idxset* outputs; /* managed in main context */
131
132 struct {
133 PA_LLIST_HEAD(struct output, active_outputs); /* managed in IO thread context */
134 pa_atomic_t running; /* we cache that value here, so that every thread can query it cheaply */
135 pa_usec_t timestamp;
136 pa_bool_t in_null_mode;
137 pa_smoother *smoother;
138 uint64_t counter;
139 } thread_info;
140 };
141
142 enum {
143 SINK_MESSAGE_ADD_OUTPUT = PA_SINK_MESSAGE_MAX,
144 SINK_MESSAGE_REMOVE_OUTPUT,
145 SINK_MESSAGE_NEED,
146 SINK_MESSAGE_UPDATE_LATENCY,
147 SINK_MESSAGE_UPDATE_MAX_REQUEST
148 };
149
150 enum {
151 SINK_INPUT_MESSAGE_POST = PA_SINK_INPUT_MESSAGE_MAX,
152 };
153
154 static void output_free(struct output *o);
155 static int output_create_sink_input(struct output *o);
156
157 static void adjust_rates(struct userdata *u) {
158 struct output *o;
159 pa_usec_t max_sink_latency = 0, min_total_latency = (pa_usec_t) -1, target_latency, avg_total_latency = 0;
160 uint32_t base_rate;
161 uint32_t idx;
162 unsigned n = 0;
163
164 pa_assert(u);
165 pa_sink_assert_ref(u->sink);
166
167 if (pa_idxset_size(u->outputs) <= 0)
168 return;
169
170 if (!PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)))
171 return;
172
173 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
174 pa_usec_t sink_latency;
175
176 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
177 continue;
178
179 o->total_latency = pa_sink_input_get_latency(o->sink_input, &sink_latency);
180 o->total_latency += sink_latency;
181
182 if (sink_latency > max_sink_latency)
183 max_sink_latency = sink_latency;
184
185 if (min_total_latency == (pa_usec_t) -1 || o->total_latency < min_total_latency)
186 min_total_latency = o->total_latency;
187
188 avg_total_latency += o->total_latency;
189 n++;
190 }
191
192 if (min_total_latency == (pa_usec_t) -1)
193 return;
194
195 avg_total_latency /= n;
196
197 target_latency = max_sink_latency > min_total_latency ? max_sink_latency : min_total_latency;
198
199 pa_log_info("[%s] avg total latency is %0.2f msec.", u->sink->name, (double) avg_total_latency / PA_USEC_PER_MSEC);
200 pa_log_info("[%s] target latency is %0.2f msec.", u->sink->name, (double) target_latency / PA_USEC_PER_MSEC);
201
202 base_rate = u->sink->sample_spec.rate;
203
204 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
205 uint32_t r = base_rate;
206
207 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
208 continue;
209
210 if (o->total_latency < target_latency)
211 r -= (uint32_t) ((((double) (target_latency - o->total_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
212 else if (o->total_latency > target_latency)
213 r += (uint32_t) ((((double) (o->total_latency - target_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
214
215 if (r < (uint32_t) (base_rate*0.9) || r > (uint32_t) (base_rate*1.1)) {
216 pa_log_warn("[%s] sample rates too different, not adjusting (%u vs. %u).", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), base_rate, r);
217 pa_sink_input_set_rate(o->sink_input, base_rate);
218 } else {
219 pa_log_info("[%s] new rate is %u Hz; ratio is %0.3f; latency is %0.0f usec.", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), r, (double) r / base_rate, (float) o->total_latency);
220 pa_sink_input_set_rate(o->sink_input, r);
221 }
222 }
223
224 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_UPDATE_LATENCY, NULL, (int64_t) avg_total_latency, NULL);
225 }
226
227 static void time_callback(pa_mainloop_api*a, pa_time_event* e, const struct timeval *tv, void *userdata) {
228 struct userdata *u = userdata;
229 struct timeval n;
230
231 pa_assert(u);
232 pa_assert(a);
233 pa_assert(u->time_event == e);
234
235 adjust_rates(u);
236
237 pa_gettimeofday(&n);
238 n.tv_sec += (time_t) u->adjust_time;
239 u->sink->core->mainloop->time_restart(e, &n);
240 }
241
242 static void process_render_null(struct userdata *u, pa_usec_t now) {
243 size_t ate = 0;
244 pa_assert(u);
245
246 if (u->thread_info.in_null_mode)
247 u->thread_info.timestamp = now;
248
249 while (u->thread_info.timestamp < now + u->block_usec) {
250 pa_memchunk chunk;
251
252 pa_sink_render(u->sink, u->sink->thread_info.max_request, &chunk);
253 pa_memblock_unref(chunk.memblock);
254
255 u->thread_info.counter += chunk.length;
256
257 /* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */
258 u->thread_info.timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec);
259
260 ate += chunk.length;
261
262 if (ate >= u->sink->thread_info.max_request)
263 break;
264 }
265
266 /* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */
267
268 pa_smoother_put(u->thread_info.smoother, now,
269 pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec) - (u->thread_info.timestamp - now));
270 }
271
272 static void thread_func(void *userdata) {
273 struct userdata *u = userdata;
274
275 pa_assert(u);
276
277 pa_log_debug("Thread starting up");
278
279 if (u->core->realtime_scheduling)
280 pa_make_realtime(u->core->realtime_priority+1);
281
282 pa_thread_mq_install(&u->thread_mq);
283
284 u->thread_info.timestamp = pa_rtclock_usec();
285 u->thread_info.in_null_mode = FALSE;
286
287 for (;;) {
288 int ret;
289
290 if (PA_SINK_IS_OPENED(u->sink->thread_info.state))
291 if (u->sink->thread_info.rewind_requested)
292 pa_sink_process_rewind(u->sink, 0);
293
294 /* If no outputs are connected, render some data and drop it immediately. */
295 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && !u->thread_info.active_outputs) {
296 pa_usec_t now;
297
298 now = pa_rtclock_usec();
299
300 if (!u->thread_info.in_null_mode || u->thread_info.timestamp <= now)
301 process_render_null(u, now);
302
303 pa_rtpoll_set_timer_absolute(u->rtpoll, u->thread_info.timestamp);
304 u->thread_info.in_null_mode = TRUE;
305 } else {
306 pa_rtpoll_set_timer_disabled(u->rtpoll);
307 u->thread_info.in_null_mode = FALSE;
308 }
309
310 /* Hmm, nothing to do. Let's sleep */
311 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) {
312 pa_log_info("pa_rtpoll_run() = %i", ret);
313 goto fail;
314 }
315
316 if (ret == 0)
317 goto finish;
318 }
319
320 fail:
321 /* If this was no regular exit from the loop we have to continue
322 * processing messages until we received PA_MESSAGE_SHUTDOWN */
323 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
324 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
325
326 finish:
327 pa_log_debug("Thread shutting down");
328 }
329
330 /* Called from I/O thread context */
331 static void render_memblock(struct userdata *u, struct output *o, size_t length) {
332 pa_assert(u);
333 pa_assert(o);
334
335 /* We are run by the sink thread, on behalf of an output (o). The
336 * output is waiting for us, hence it is safe to access its
337 * mainblockq and asyncmsgq directly. */
338
339 /* If we are not running, we cannot produce any data */
340 if (!pa_atomic_load(&u->thread_info.running))
341 return;
342
343 /* Maybe there's some data in the requesting output's queue
344 * now? */
345 while (pa_asyncmsgq_process_one(o->inq) > 0)
346 ;
347
348 /* Ok, now let's prepare some data if we really have to */
349 while (!pa_memblockq_is_readable(o->memblockq)) {
350 struct output *j;
351 pa_memchunk chunk;
352
353 /* Render data! */
354 pa_sink_render(u->sink, length, &chunk);
355
356 u->thread_info.counter += chunk.length;
357
358 /* OK, let's send this data to the other threads */
359 for (j = u->thread_info.active_outputs; j; j = j->next)
360
361 /* Send to other outputs, which are not the requesting
362 * one */
363
364 if (j != o)
365 pa_asyncmsgq_post(j->inq, PA_MSGOBJECT(j->sink_input), SINK_INPUT_MESSAGE_POST, NULL, 0, &chunk, NULL);
366
367 /* And place it directly into the requesting output's queue */
368 if (o)
369 pa_memblockq_push_align(o->memblockq, &chunk);
370
371 pa_memblock_unref(chunk.memblock);
372 }
373 }
374
375 /* Called from I/O thread context */
376 static void request_memblock(struct output *o, size_t length) {
377 pa_assert(o);
378 pa_sink_input_assert_ref(o->sink_input);
379 pa_sink_assert_ref(o->userdata->sink);
380
381 /* If another thread already prepared some data we received
382 * the data over the asyncmsgq, hence let's first process
383 * it. */
384 while (pa_asyncmsgq_process_one(o->inq) > 0)
385 ;
386
387 /* Check whether we're now readable */
388 if (pa_memblockq_is_readable(o->memblockq))
389 return;
390
391 /* OK, we need to prepare new data, but only if the sink is actually running */
392 if (pa_atomic_load(&o->userdata->thread_info.running))
393 pa_asyncmsgq_send(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_NEED, o, (int64_t) length, NULL);
394 }
395
396 /* Called from I/O thread context */
397 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
398 struct output *o;
399
400 pa_sink_input_assert_ref(i);
401 pa_assert_se(o = i->userdata);
402
403 /* If necessary, get some new data */
404 request_memblock(o, nbytes);
405
406 if (pa_memblockq_peek(o->memblockq, chunk) < 0)
407 return -1;
408
409 pa_memblockq_drop(o->memblockq, chunk->length);
410 return 0;
411 }
412
413 /* Called from I/O thread context */
414 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
415 struct output *o;
416
417 pa_sink_input_assert_ref(i);
418 pa_assert_se(o = i->userdata);
419
420 pa_memblockq_rewind(o->memblockq, nbytes);
421 }
422
423 /* Called from I/O thread context */
424 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
425 struct output *o;
426
427 pa_sink_input_assert_ref(i);
428 pa_assert_se(o = i->userdata);
429
430 pa_memblockq_set_maxrewind(o->memblockq, nbytes);
431 }
432
433 /* Called from I/O thread context */
434 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
435 struct output *o;
436
437 pa_sink_input_assert_ref(i);
438 pa_assert_se(o = i->userdata);
439
440 if (pa_atomic_load(&o->max_request) == (int) nbytes)
441 return;
442
443 pa_atomic_store(&o->max_request, (int) nbytes);
444
445 pa_asyncmsgq_post(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_UPDATE_MAX_REQUEST, NULL, 0, NULL, NULL);
446 }
447
448 /* Called from I/O thread context */
449 static void sink_input_attach_cb(pa_sink_input *i) {
450 struct output *o;
451
452 pa_sink_input_assert_ref(i);
453 pa_assert_se(o = i->userdata);
454
455 /* Set up the queue from the sink thread to us */
456 pa_assert(!o->inq_rtpoll_item_read && !o->outq_rtpoll_item_write);
457
458 o->inq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
459 i->sink->rtpoll,
460 PA_RTPOLL_LATE, /* This one is not that important, since we check for data in _peek() anyway. */
461 o->inq);
462
463 o->outq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
464 i->sink->rtpoll,
465 PA_RTPOLL_EARLY,
466 o->outq);
467 }
468
469 /* Called from I/O thread context */
470 static void sink_input_detach_cb(pa_sink_input *i) {
471 struct output *o;
472
473 pa_sink_input_assert_ref(i);
474 pa_assert_se(o = i->userdata);
475
476 /* Shut down the queue from the sink thread to us */
477 pa_assert(o->inq_rtpoll_item_read && o->outq_rtpoll_item_write);
478
479 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
480 o->inq_rtpoll_item_read = NULL;
481
482 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
483 o->outq_rtpoll_item_write = NULL;
484 }
485
486 /* Called from main context */
487 static void sink_input_kill_cb(pa_sink_input *i) {
488 struct output *o;
489
490 pa_sink_input_assert_ref(i);
491 pa_assert_se(o = i->userdata);
492
493 pa_module_unload_request(o->userdata->module, TRUE);
494 output_free(o);
495 }
496
497 /* Called from IO thread context */
498 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
499 struct userdata *u;
500
501 pa_sink_input_assert_ref(i);
502 pa_assert_se(u = i->userdata);
503
504 /* If we are added for the first time, ask for a rewinding so that
505 * we are heard right-away. */
506 if (PA_SINK_INPUT_IS_LINKED(state) &&
507 i->thread_info.state == PA_SINK_INPUT_INIT)
508 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
509 }
510
511 /* Called from thread context */
512 static int sink_input_process_msg(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
513 struct output *o = PA_SINK_INPUT(obj)->userdata;
514
515 switch (code) {
516
517 case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
518 pa_usec_t *r = data;
519
520 *r = pa_bytes_to_usec(pa_memblockq_get_length(o->memblockq), &o->sink_input->sample_spec);
521
522 /* Fall through, the default handler will add in the extra
523 * latency added by the resampler */
524 break;
525 }
526
527 case SINK_INPUT_MESSAGE_POST:
528
529 if (PA_SINK_IS_OPENED(o->sink_input->sink->thread_info.state))
530 pa_memblockq_push_align(o->memblockq, chunk);
531 else
532 pa_memblockq_flush_write(o->memblockq);
533
534 return 0;
535 }
536
537 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
538 }
539
540 /* Called from main context */
541 static void disable_output(struct output *o) {
542 pa_assert(o);
543
544 if (!o->sink_input)
545 return;
546
547 pa_sink_input_unlink(o->sink_input);
548 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_REMOVE_OUTPUT, o, 0, NULL);
549 pa_sink_input_unref(o->sink_input);
550 o->sink_input = NULL;
551 }
552
553 /* Called from main context */
554 static void enable_output(struct output *o) {
555 pa_assert(o);
556
557 if (o->sink_input)
558 return;
559
560 if (output_create_sink_input(o) >= 0) {
561
562 pa_memblockq_flush_write(o->memblockq);
563
564 pa_sink_input_put(o->sink_input);
565
566 if (o->userdata->sink && PA_SINK_IS_LINKED(pa_sink_get_state(o->userdata->sink)))
567 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
568 }
569 }
570
571 /* Called from main context */
572 static void suspend(struct userdata *u) {
573 struct output *o;
574 uint32_t idx;
575
576 pa_assert(u);
577
578 /* Let's suspend by unlinking all streams */
579 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
580 disable_output(o);
581
582 pa_log_info("Device suspended...");
583 }
584
585 /* Called from main context */
586 static void unsuspend(struct userdata *u) {
587 struct output *o;
588 uint32_t idx;
589
590 pa_assert(u);
591
592 /* Let's resume */
593 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
594
595 pa_sink_suspend(o->sink, FALSE, PA_SUSPEND_IDLE);
596
597 if (PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
598 enable_output(o);
599 }
600
601 pa_log_info("Resumed successfully...");
602 }
603
604 /* Called from main context */
605 static int sink_set_state(pa_sink *sink, pa_sink_state_t state) {
606 struct userdata *u;
607
608 pa_sink_assert_ref(sink);
609 pa_assert_se(u = sink->userdata);
610
611 /* Please note that in contrast to the ALSA modules we call
612 * suspend/unsuspend from main context here! */
613
614 switch (state) {
615 case PA_SINK_SUSPENDED:
616 pa_assert(PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)));
617
618 suspend(u);
619 break;
620
621 case PA_SINK_IDLE:
622 case PA_SINK_RUNNING:
623
624 if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED)
625 unsuspend(u);
626
627 break;
628
629 case PA_SINK_UNLINKED:
630 case PA_SINK_INIT:
631 case PA_SINK_INVALID_STATE:
632 ;
633 }
634
635 return 0;
636 }
637
638 /* Called from IO context */
639 static void update_max_request(struct userdata *u) {
640 size_t max_request = 0;
641 struct output *o;
642
643 for (o = u->thread_info.active_outputs; o; o = o->next) {
644 size_t mr = (size_t) pa_atomic_load(&o->max_request);
645
646 if (mr > max_request)
647 max_request = mr;
648 }
649
650 if (max_request <= 0)
651 max_request = pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec);
652
653 pa_sink_set_max_request_within_thread(u->sink, max_request);
654 }
655
656 /* Called from thread context of the io thread */
657 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
658 struct userdata *u = PA_SINK(o)->userdata;
659
660 switch (code) {
661
662 case PA_SINK_MESSAGE_SET_STATE:
663 pa_atomic_store(&u->thread_info.running, PA_PTR_TO_UINT(data) == PA_SINK_RUNNING);
664
665 if (PA_PTR_TO_UINT(data) == PA_SINK_SUSPENDED)
666 pa_smoother_pause(u->thread_info.smoother, pa_rtclock_usec());
667 else
668 pa_smoother_resume(u->thread_info.smoother, pa_rtclock_usec(), TRUE);
669
670 break;
671
672 case PA_SINK_MESSAGE_GET_LATENCY: {
673 pa_usec_t x, y, c, *delay = data;
674
675 x = pa_rtclock_usec();
676 y = pa_smoother_get(u->thread_info.smoother, x);
677
678 c = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
679
680 if (y < c)
681 *delay = c - y;
682 else
683 *delay = 0;
684
685 return 0;
686 }
687
688 case SINK_MESSAGE_ADD_OUTPUT: {
689 struct output *op = data;
690
691 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, op);
692
693 pa_assert(!op->outq_rtpoll_item_read && !op->inq_rtpoll_item_write);
694
695 op->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
696 u->rtpoll,
697 PA_RTPOLL_EARLY-1, /* This item is very important */
698 op->outq);
699 op->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
700 u->rtpoll,
701 PA_RTPOLL_EARLY,
702 op->inq);
703
704 update_max_request(u);
705 return 0;
706 }
707
708 case SINK_MESSAGE_REMOVE_OUTPUT: {
709 struct output *op = data;
710
711 PA_LLIST_REMOVE(struct output, u->thread_info.active_outputs, op);
712
713 pa_assert(op->outq_rtpoll_item_read && op->inq_rtpoll_item_write);
714
715 pa_rtpoll_item_free(op->outq_rtpoll_item_read);
716 op->outq_rtpoll_item_read = NULL;
717
718 pa_rtpoll_item_free(op->inq_rtpoll_item_write);
719 op->inq_rtpoll_item_write = NULL;
720
721 update_max_request(u);
722 return 0;
723 }
724
725 case SINK_MESSAGE_NEED:
726 render_memblock(u, (struct output*) data, (size_t) offset);
727 return 0;
728
729 case SINK_MESSAGE_UPDATE_LATENCY: {
730 pa_usec_t x, y, latency = (pa_usec_t) offset;
731
732 x = pa_rtclock_usec();
733 y = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
734
735 if (y > latency)
736 y -= latency;
737 else
738 y = 0;
739
740 pa_smoother_put(u->thread_info.smoother, x, y);
741 return 0;
742 }
743
744 case SINK_MESSAGE_UPDATE_MAX_REQUEST:
745
746 update_max_request(u);
747 break;
748 }
749
750 return pa_sink_process_msg(o, code, data, offset, chunk);
751 }
752
753 static void update_description(struct userdata *u) {
754 pa_bool_t first = TRUE;
755 char *t;
756 struct output *o;
757 uint32_t idx;
758
759 pa_assert(u);
760
761 if (pa_idxset_isempty(u->outputs)) {
762 pa_sink_set_description(u->sink, "Simultaneous output");
763 return;
764 }
765
766 t = pa_xstrdup("Simultaneous output to");
767
768 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
769 char *e;
770
771 if (first) {
772 e = pa_sprintf_malloc("%s %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
773 first = FALSE;
774 } else
775 e = pa_sprintf_malloc("%s, %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
776
777 pa_xfree(t);
778 t = e;
779 }
780
781 pa_sink_set_description(u->sink, t);
782 pa_xfree(t);
783 }
784
785 static int output_create_sink_input(struct output *o) {
786 pa_sink_input_new_data data;
787
788 pa_assert(o);
789
790 if (o->sink_input)
791 return 0;
792
793 pa_sink_input_new_data_init(&data);
794 data.sink = o->sink;
795 data.driver = __FILE__;
796 pa_proplist_setf(data.proplist, PA_PROP_MEDIA_NAME, "Simultaneous output on %s", pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
797 pa_proplist_sets(data.proplist, PA_PROP_MEDIA_ROLE, "filter");
798 pa_sink_input_new_data_set_sample_spec(&data, &o->userdata->sink->sample_spec);
799 pa_sink_input_new_data_set_channel_map(&data, &o->userdata->sink->channel_map);
800 data.module = o->userdata->module;
801 data.resample_method = o->userdata->resample_method;
802
803 pa_sink_input_new(&o->sink_input, o->userdata->core, &data, PA_SINK_INPUT_VARIABLE_RATE|PA_SINK_INPUT_DONT_MOVE);
804
805 pa_sink_input_new_data_done(&data);
806
807 if (!o->sink_input)
808 return -1;
809
810 o->sink_input->parent.process_msg = sink_input_process_msg;
811 o->sink_input->pop = sink_input_pop_cb;
812 o->sink_input->process_rewind = sink_input_process_rewind_cb;
813 o->sink_input->state_change = sink_input_state_change_cb;
814 o->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
815 o->sink_input->update_max_request = sink_input_update_max_request_cb;
816 o->sink_input->attach = sink_input_attach_cb;
817 o->sink_input->detach = sink_input_detach_cb;
818 o->sink_input->kill = sink_input_kill_cb;
819 o->sink_input->userdata = o;
820
821 pa_sink_input_set_requested_latency(o->sink_input, BLOCK_USEC);
822
823 return 0;
824 }
825
826 static struct output *output_new(struct userdata *u, pa_sink *sink) {
827 struct output *o;
828 pa_sink_state_t state;
829
830 pa_assert(u);
831 pa_assert(sink);
832 pa_assert(u->sink);
833
834 o = pa_xnew(struct output, 1);
835 o->userdata = u;
836 o->inq = pa_asyncmsgq_new(0);
837 o->outq = pa_asyncmsgq_new(0);
838 o->inq_rtpoll_item_write = o->inq_rtpoll_item_read = NULL;
839 o->outq_rtpoll_item_write = o->outq_rtpoll_item_read = NULL;
840 o->sink = sink;
841 o->sink_input = NULL;
842 o->memblockq = pa_memblockq_new(
843 0,
844 MEMBLOCKQ_MAXLENGTH,
845 MEMBLOCKQ_MAXLENGTH,
846 pa_frame_size(&u->sink->sample_spec),
847 1,
848 0,
849 0,
850 NULL);
851 pa_atomic_store(&o->max_request, 0);
852 PA_LLIST_INIT(struct output, o);
853
854 pa_assert_se(pa_idxset_put(u->outputs, o, NULL) == 0);
855
856 state = pa_sink_get_state(u->sink);
857
858 if (state != PA_SINK_INIT)
859 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
860 else {
861 /* If the sink is not yet started, we need to do the activation ourselves */
862 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, o);
863
864 o->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
865 u->rtpoll,
866 PA_RTPOLL_EARLY-1, /* This item is very important */
867 o->outq);
868 o->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
869 u->rtpoll,
870 PA_RTPOLL_EARLY,
871 o->inq);
872 }
873
874 if (PA_SINK_IS_OPENED(state) || state == PA_SINK_INIT) {
875 pa_sink_suspend(sink, FALSE, PA_SUSPEND_IDLE);
876
877 if (PA_SINK_IS_OPENED(pa_sink_get_state(sink)))
878 if (output_create_sink_input(o) < 0)
879 goto fail;
880 }
881
882 update_description(u);
883
884 return o;
885
886 fail:
887
888 if (o) {
889 pa_idxset_remove_by_data(u->outputs, o, NULL);
890
891 if (o->sink_input) {
892 pa_sink_input_unlink(o->sink_input);
893 pa_sink_input_unref(o->sink_input);
894 }
895
896 if (o->memblockq)
897 pa_memblockq_free(o->memblockq);
898
899 if (o->inq)
900 pa_asyncmsgq_unref(o->inq);
901
902 if (o->outq)
903 pa_asyncmsgq_unref(o->outq);
904
905 pa_xfree(o);
906 }
907
908 return NULL;
909 }
910
911 static pa_bool_t is_suitable_sink(struct userdata *u, pa_sink *s) {
912 const char *t;
913
914 pa_sink_assert_ref(s);
915
916 if (!(s->flags & PA_SINK_HARDWARE))
917 return FALSE;
918
919 if (s == u->sink)
920 return FALSE;
921
922 if ((t = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_CLASS)))
923 if (strcmp(t, "sound"))
924 return FALSE;
925
926 return TRUE;
927 }
928
929 static pa_hook_result_t sink_put_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
930 struct output *o;
931
932 pa_core_assert_ref(c);
933 pa_sink_assert_ref(s);
934 pa_assert(u);
935 pa_assert(u->automatic);
936
937 if (!is_suitable_sink(u, s))
938 return PA_HOOK_OK;
939
940 pa_log_info("Configuring new sink: %s", s->name);
941
942 if (!(o = output_new(u, s))) {
943 pa_log("Failed to create sink input on sink '%s'.", s->name);
944 return PA_HOOK_OK;
945 }
946
947 if (o->sink_input)
948 pa_sink_input_put(o->sink_input);
949
950 return PA_HOOK_OK;
951 }
952
953 static struct output* find_output(struct userdata *u, pa_sink *s) {
954 struct output *o;
955 uint32_t idx;
956
957 pa_assert(u);
958 pa_assert(s);
959
960 if (u->sink == s)
961 return NULL;
962
963 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
964 if (o->sink == s)
965 return o;
966
967 return NULL;
968 }
969
970 static pa_hook_result_t sink_unlink_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
971 struct output *o;
972
973 pa_assert(c);
974 pa_sink_assert_ref(s);
975 pa_assert(u);
976
977 if (!(o = find_output(u, s)))
978 return PA_HOOK_OK;
979
980 pa_log_info("Unconfiguring sink: %s", s->name);
981
982 output_free(o);
983
984 return PA_HOOK_OK;
985 }
986
987 static pa_hook_result_t sink_state_changed_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
988 struct output *o;
989 pa_sink_state_t state;
990
991 if (!(o = find_output(u, s)))
992 return PA_HOOK_OK;
993
994 state = pa_sink_get_state(s);
995
996 if (PA_SINK_IS_OPENED(state) && PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)) && !o->sink_input)
997 enable_output(o);
998
999 if (state == PA_SINK_SUSPENDED && o->sink_input)
1000 disable_output(o);
1001
1002 return PA_HOOK_OK;
1003 }
1004
1005 int pa__init(pa_module*m) {
1006 struct userdata *u;
1007 pa_modargs *ma = NULL;
1008 const char *slaves, *rm;
1009 int resample_method = PA_RESAMPLER_TRIVIAL;
1010 pa_sample_spec ss;
1011 pa_channel_map map;
1012 struct output *o;
1013 uint32_t idx;
1014 pa_sink_new_data data;
1015
1016 pa_assert(m);
1017
1018 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1019 pa_log("failed to parse module arguments");
1020 goto fail;
1021 }
1022
1023 if ((rm = pa_modargs_get_value(ma, "resample_method", NULL))) {
1024 if ((resample_method = pa_parse_resample_method(rm)) < 0) {
1025 pa_log("invalid resample method '%s'", rm);
1026 goto fail;
1027 }
1028 }
1029
1030 m->userdata = u = pa_xnew(struct userdata, 1);
1031 u->core = m->core;
1032 u->module = m;
1033 u->sink = NULL;
1034 u->time_event = NULL;
1035 u->adjust_time = DEFAULT_ADJUST_TIME;
1036 u->rtpoll = pa_rtpoll_new();
1037 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1038 u->thread = NULL;
1039 u->resample_method = resample_method;
1040 u->outputs = pa_idxset_new(NULL, NULL);
1041 memset(&u->adjust_timestamp, 0, sizeof(u->adjust_timestamp));
1042 u->sink_put_slot = u->sink_unlink_slot = u->sink_state_changed_slot = NULL;
1043 PA_LLIST_HEAD_INIT(struct output, u->thread_info.active_outputs);
1044 pa_atomic_store(&u->thread_info.running, FALSE);
1045 u->thread_info.in_null_mode = FALSE;
1046 u->thread_info.counter = 0;
1047 u->thread_info.smoother = pa_smoother_new(
1048 PA_USEC_PER_SEC,
1049 PA_USEC_PER_SEC*2,
1050 TRUE,
1051 TRUE,
1052 10,
1053 0,
1054 FALSE);
1055
1056 if (pa_modargs_get_value_u32(ma, "adjust_time", &u->adjust_time) < 0) {
1057 pa_log("Failed to parse adjust_time value");
1058 goto fail;
1059 }
1060
1061 slaves = pa_modargs_get_value(ma, "slaves", NULL);
1062 u->automatic = !slaves;
1063
1064 ss = m->core->default_sample_spec;
1065 map = m->core->default_channel_map;
1066 if ((pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0)) {
1067 pa_log("Invalid sample specification.");
1068 goto fail;
1069 }
1070
1071 pa_sink_new_data_init(&data);
1072 data.namereg_fail = FALSE;
1073 data.driver = __FILE__;
1074 data.module = m;
1075 pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME));
1076 pa_sink_new_data_set_sample_spec(&data, &ss);
1077 pa_sink_new_data_set_channel_map(&data, &map);
1078 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Simultaneous Output");
1079 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1080
1081 if (slaves)
1082 pa_proplist_sets(data.proplist, "combine.slaves", slaves);
1083
1084 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1085 pa_log("Invalid properties");
1086 pa_sink_new_data_done(&data);
1087 goto fail;
1088 }
1089
1090 u->sink = pa_sink_new(m->core, &data, PA_SINK_LATENCY);
1091 pa_sink_new_data_done(&data);
1092
1093 if (!u->sink) {
1094 pa_log("Failed to create sink");
1095 goto fail;
1096 }
1097
1098 u->sink->parent.process_msg = sink_process_msg;
1099 u->sink->set_state = sink_set_state;
1100 u->sink->userdata = u;
1101
1102 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1103 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1104
1105 u->block_usec = BLOCK_USEC;
1106 pa_sink_set_max_request(u->sink, pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec));
1107
1108 if (!u->automatic) {
1109 const char*split_state;
1110 char *n = NULL;
1111 pa_assert(slaves);
1112
1113 /* The slaves have been specified manually */
1114
1115 split_state = NULL;
1116 while ((n = pa_split(slaves, ",", &split_state))) {
1117 pa_sink *slave_sink;
1118
1119 if (!(slave_sink = pa_namereg_get(m->core, n, PA_NAMEREG_SINK)) || slave_sink == u->sink) {
1120 pa_log("Invalid slave sink '%s'", n);
1121 pa_xfree(n);
1122 goto fail;
1123 }
1124
1125 pa_xfree(n);
1126
1127 if (!output_new(u, slave_sink)) {
1128 pa_log("Failed to create slave sink input on sink '%s'.", slave_sink->name);
1129 goto fail;
1130 }
1131 }
1132
1133 if (pa_idxset_size(u->outputs) <= 1)
1134 pa_log_warn("No slave sinks specified.");
1135
1136 u->sink_put_slot = NULL;
1137
1138 } else {
1139 pa_sink *s;
1140
1141 /* We're in automatic mode, we add every sink that matches our needs */
1142
1143 for (s = pa_idxset_first(m->core->sinks, &idx); s; s = pa_idxset_next(m->core->sinks, &idx)) {
1144
1145 if (!is_suitable_sink(u, s))
1146 continue;
1147
1148 if (!output_new(u, s)) {
1149 pa_log("Failed to create sink input on sink '%s'.", s->name);
1150 goto fail;
1151 }
1152 }
1153
1154 u->sink_put_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_PUT], PA_HOOK_LATE, (pa_hook_cb_t) sink_put_hook_cb, u);
1155 }
1156
1157 u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], PA_HOOK_EARLY, (pa_hook_cb_t) sink_unlink_hook_cb, u);
1158 u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], PA_HOOK_NORMAL, (pa_hook_cb_t) sink_state_changed_hook_cb, u);
1159
1160 if (!(u->thread = pa_thread_new(thread_func, u))) {
1161 pa_log("Failed to create thread.");
1162 goto fail;
1163 }
1164
1165 /* Activate the sink and the sink inputs */
1166 pa_sink_put(u->sink);
1167
1168 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
1169 if (o->sink_input)
1170 pa_sink_input_put(o->sink_input);
1171
1172 if (u->adjust_time > 0) {
1173 struct timeval tv;
1174 pa_gettimeofday(&tv);
1175 tv.tv_sec += (time_t) u->adjust_time;
1176 u->time_event = m->core->mainloop->time_new(m->core->mainloop, &tv, time_callback, u);
1177 }
1178
1179 pa_modargs_free(ma);
1180
1181 return 0;
1182
1183 fail:
1184
1185 if (ma)
1186 pa_modargs_free(ma);
1187
1188 pa__done(m);
1189
1190 return -1;
1191 }
1192
1193 static void output_free(struct output *o) {
1194 pa_assert(o);
1195
1196 disable_output(o);
1197
1198 pa_assert_se(pa_idxset_remove_by_data(o->userdata->outputs, o, NULL));
1199
1200 update_description(o->userdata);
1201
1202 if (o->inq_rtpoll_item_read)
1203 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
1204 if (o->inq_rtpoll_item_write)
1205 pa_rtpoll_item_free(o->inq_rtpoll_item_write);
1206
1207 if (o->outq_rtpoll_item_read)
1208 pa_rtpoll_item_free(o->outq_rtpoll_item_read);
1209 if (o->outq_rtpoll_item_write)
1210 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
1211
1212 if (o->inq)
1213 pa_asyncmsgq_unref(o->inq);
1214
1215 if (o->outq)
1216 pa_asyncmsgq_unref(o->outq);
1217
1218 if (o->memblockq)
1219 pa_memblockq_free(o->memblockq);
1220
1221 pa_xfree(o);
1222 }
1223
1224 void pa__done(pa_module*m) {
1225 struct userdata *u;
1226 struct output *o;
1227
1228 pa_assert(m);
1229
1230 if (!(u = m->userdata))
1231 return;
1232
1233 if (u->sink_put_slot)
1234 pa_hook_slot_free(u->sink_put_slot);
1235
1236 if (u->sink_unlink_slot)
1237 pa_hook_slot_free(u->sink_unlink_slot);
1238
1239 if (u->sink_state_changed_slot)
1240 pa_hook_slot_free(u->sink_state_changed_slot);
1241
1242 if (u->outputs) {
1243 while ((o = pa_idxset_first(u->outputs, NULL)))
1244 output_free(o);
1245
1246 pa_idxset_free(u->outputs, NULL, NULL);
1247 }
1248
1249 if (u->sink)
1250 pa_sink_unlink(u->sink);
1251
1252 if (u->thread) {
1253 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1254 pa_thread_free(u->thread);
1255 }
1256
1257 pa_thread_mq_done(&u->thread_mq);
1258
1259 if (u->sink)
1260 pa_sink_unref(u->sink);
1261
1262 if (u->rtpoll)
1263 pa_rtpoll_free(u->rtpoll);
1264
1265 if (u->time_event)
1266 u->core->mainloop->time_free(u->time_event);
1267
1268 if (u->thread_info.smoother)
1269 pa_smoother_free(u->thread_info.smoother);
1270
1271 pa_xfree(u);
1272 }