]> code.delx.au - pulseaudio/blob - src/modules/module-combine.c
02a7e1ff27be63b84376f82dc7a63d3fb01e8b97
[pulseaudio] / src / modules / module-combine.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28
29 #include <pulse/timeval.h>
30 #include <pulse/xmalloc.h>
31
32 #include <pulsecore/macro.h>
33 #include <pulsecore/module.h>
34 #include <pulsecore/llist.h>
35 #include <pulsecore/sink.h>
36 #include <pulsecore/sink-input.h>
37 #include <pulsecore/memblockq.h>
38 #include <pulsecore/log.h>
39 #include <pulsecore/core-util.h>
40 #include <pulsecore/modargs.h>
41 #include <pulsecore/namereg.h>
42 #include <pulsecore/mutex.h>
43 #include <pulsecore/thread.h>
44 #include <pulsecore/thread-mq.h>
45 #include <pulsecore/rtpoll.h>
46 #include <pulsecore/rtclock.h>
47 #include <pulsecore/core-error.h>
48 #include <pulsecore/time-smoother.h>
49
50 #include "module-combine-symdef.h"
51
52 PA_MODULE_AUTHOR("Lennart Poettering");
53 PA_MODULE_DESCRIPTION("Combine multiple sinks to one");
54 PA_MODULE_VERSION(PACKAGE_VERSION);
55 PA_MODULE_LOAD_ONCE(FALSE);
56 PA_MODULE_USAGE(
57 "sink_name=<name for the sink> "
58 "sink_properties=<properties for the sink> "
59 "slaves=<slave sinks> "
60 "adjust_time=<seconds> "
61 "resample_method=<method> "
62 "format=<sample format> "
63 "rate=<sample rate> "
64 "channels=<number of channels> "
65 "channel_map=<channel map>");
66
67 #define DEFAULT_SINK_NAME "combined"
68
69 #define MEMBLOCKQ_MAXLENGTH (1024*1024*16)
70
71 #define DEFAULT_ADJUST_TIME 10
72
73 #define BLOCK_USEC (PA_USEC_PER_MSEC * 200)
74
75 static const char* const valid_modargs[] = {
76 "sink_name",
77 "sink_properties",
78 "slaves",
79 "adjust_time",
80 "resample_method",
81 "format",
82 "rate",
83 "channels",
84 "channel_map",
85 NULL
86 };
87
88 struct output {
89 struct userdata *userdata;
90
91 pa_sink *sink;
92 pa_sink_input *sink_input;
93
94 pa_asyncmsgq *inq, /* Message queue from the sink thread to this sink input */
95 *outq; /* Message queue from this sink input to the sink thread */
96 pa_rtpoll_item *inq_rtpoll_item_read, *inq_rtpoll_item_write;
97 pa_rtpoll_item *outq_rtpoll_item_read, *outq_rtpoll_item_write;
98
99 pa_memblockq *memblockq;
100
101 pa_usec_t total_latency;
102
103 pa_atomic_t max_request;
104
105 PA_LLIST_FIELDS(struct output);
106 };
107
108 struct userdata {
109 pa_core *core;
110 pa_module *module;
111 pa_sink *sink;
112
113 pa_thread *thread;
114 pa_thread_mq thread_mq;
115 pa_rtpoll *rtpoll;
116
117 pa_time_event *time_event;
118 uint32_t adjust_time;
119
120 pa_bool_t automatic;
121
122 pa_hook_slot *sink_put_slot, *sink_unlink_slot, *sink_state_changed_slot;
123
124 pa_resample_method_t resample_method;
125
126 struct timeval adjust_timestamp;
127
128 pa_usec_t block_usec;
129
130 pa_idxset* outputs; /* managed in main context */
131
132 struct {
133 PA_LLIST_HEAD(struct output, active_outputs); /* managed in IO thread context */
134 pa_atomic_t running; /* we cache that value here, so that every thread can query it cheaply */
135 pa_usec_t timestamp;
136 pa_bool_t in_null_mode;
137 pa_smoother *smoother;
138 uint64_t counter;
139 } thread_info;
140 };
141
142 enum {
143 SINK_MESSAGE_ADD_OUTPUT = PA_SINK_MESSAGE_MAX,
144 SINK_MESSAGE_REMOVE_OUTPUT,
145 SINK_MESSAGE_NEED,
146 SINK_MESSAGE_UPDATE_LATENCY,
147 SINK_MESSAGE_UPDATE_MAX_REQUEST
148 };
149
150 enum {
151 SINK_INPUT_MESSAGE_POST = PA_SINK_INPUT_MESSAGE_MAX,
152 };
153
154 static void output_free(struct output *o);
155 static int output_create_sink_input(struct output *o);
156
157 static void adjust_rates(struct userdata *u) {
158 struct output *o;
159 pa_usec_t max_sink_latency = 0, min_total_latency = (pa_usec_t) -1, target_latency, avg_total_latency = 0;
160 uint32_t base_rate;
161 uint32_t idx;
162 unsigned n = 0;
163
164 pa_assert(u);
165 pa_sink_assert_ref(u->sink);
166
167 if (pa_idxset_size(u->outputs) <= 0)
168 return;
169
170 if (!PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)))
171 return;
172
173 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
174 pa_usec_t sink_latency;
175
176 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
177 continue;
178
179 o->total_latency = pa_sink_input_get_latency(o->sink_input, &sink_latency);
180 o->total_latency += sink_latency;
181
182 if (sink_latency > max_sink_latency)
183 max_sink_latency = sink_latency;
184
185 if (min_total_latency == (pa_usec_t) -1 || o->total_latency < min_total_latency)
186 min_total_latency = o->total_latency;
187
188 avg_total_latency += o->total_latency;
189 n++;
190 }
191
192 if (min_total_latency == (pa_usec_t) -1)
193 return;
194
195 avg_total_latency /= n;
196
197 target_latency = max_sink_latency > min_total_latency ? max_sink_latency : min_total_latency;
198
199 pa_log_info("[%s] avg total latency is %0.2f msec.", u->sink->name, (double) avg_total_latency / PA_USEC_PER_MSEC);
200 pa_log_info("[%s] target latency is %0.2f msec.", u->sink->name, (double) target_latency / PA_USEC_PER_MSEC);
201
202 base_rate = u->sink->sample_spec.rate;
203
204 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
205 uint32_t r = base_rate;
206
207 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
208 continue;
209
210 if (o->total_latency < target_latency)
211 r -= (uint32_t) ((((double) (target_latency - o->total_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
212 else if (o->total_latency > target_latency)
213 r += (uint32_t) ((((double) (o->total_latency - target_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
214
215 if (r < (uint32_t) (base_rate*0.9) || r > (uint32_t) (base_rate*1.1)) {
216 pa_log_warn("[%s] sample rates too different, not adjusting (%u vs. %u).", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), base_rate, r);
217 pa_sink_input_set_rate(o->sink_input, base_rate);
218 } else {
219 pa_log_info("[%s] new rate is %u Hz; ratio is %0.3f; latency is %0.0f usec.", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), r, (double) r / base_rate, (float) o->total_latency);
220 pa_sink_input_set_rate(o->sink_input, r);
221 }
222 }
223
224 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_UPDATE_LATENCY, NULL, (int64_t) avg_total_latency, NULL);
225 }
226
227 static void time_callback(pa_mainloop_api*a, pa_time_event* e, const struct timeval *tv, void *userdata) {
228 struct userdata *u = userdata;
229 struct timeval n;
230
231 pa_assert(u);
232 pa_assert(a);
233 pa_assert(u->time_event == e);
234
235 adjust_rates(u);
236
237 pa_gettimeofday(&n);
238 n.tv_sec += (time_t) u->adjust_time;
239 u->sink->core->mainloop->time_restart(e, &n);
240 }
241
242 static void process_render_null(struct userdata *u, pa_usec_t now) {
243 size_t ate = 0;
244 pa_assert(u);
245
246 if (u->thread_info.in_null_mode)
247 u->thread_info.timestamp = now;
248
249 while (u->thread_info.timestamp < now + u->block_usec) {
250 pa_memchunk chunk;
251
252 pa_sink_render(u->sink, u->sink->thread_info.max_request, &chunk);
253 pa_memblock_unref(chunk.memblock);
254
255 u->thread_info.counter += chunk.length;
256
257 /* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */
258 u->thread_info.timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec);
259
260 ate += chunk.length;
261
262 if (ate >= u->sink->thread_info.max_request)
263 break;
264 }
265
266 /* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */
267
268 pa_smoother_put(u->thread_info.smoother, now,
269 pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec) - (u->thread_info.timestamp - now));
270 }
271
272 static void thread_func(void *userdata) {
273 struct userdata *u = userdata;
274
275 pa_assert(u);
276
277 pa_log_debug("Thread starting up");
278
279 if (u->core->realtime_scheduling)
280 pa_make_realtime(u->core->realtime_priority+1);
281
282 pa_thread_mq_install(&u->thread_mq);
283 pa_rtpoll_install(u->rtpoll);
284
285 u->thread_info.timestamp = pa_rtclock_usec();
286 u->thread_info.in_null_mode = FALSE;
287
288 for (;;) {
289 int ret;
290
291 if (PA_SINK_IS_OPENED(u->sink->thread_info.state))
292 if (u->sink->thread_info.rewind_requested)
293 pa_sink_process_rewind(u->sink, 0);
294
295 /* If no outputs are connected, render some data and drop it immediately. */
296 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && !u->thread_info.active_outputs) {
297 pa_usec_t now;
298
299 now = pa_rtclock_usec();
300
301 if (!u->thread_info.in_null_mode || u->thread_info.timestamp <= now)
302 process_render_null(u, now);
303
304 pa_rtpoll_set_timer_absolute(u->rtpoll, u->thread_info.timestamp);
305 u->thread_info.in_null_mode = TRUE;
306 } else {
307 pa_rtpoll_set_timer_disabled(u->rtpoll);
308 u->thread_info.in_null_mode = FALSE;
309 }
310
311 /* Hmm, nothing to do. Let's sleep */
312 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) {
313 pa_log_info("pa_rtpoll_run() = %i", ret);
314 goto fail;
315 }
316
317 if (ret == 0)
318 goto finish;
319 }
320
321 fail:
322 /* If this was no regular exit from the loop we have to continue
323 * processing messages until we received PA_MESSAGE_SHUTDOWN */
324 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
325 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
326
327 finish:
328 pa_log_debug("Thread shutting down");
329 }
330
331 /* Called from I/O thread context */
332 static void render_memblock(struct userdata *u, struct output *o, size_t length) {
333 pa_assert(u);
334 pa_assert(o);
335
336 /* We are run by the sink thread, on behalf of an output (o). The
337 * output is waiting for us, hence it is safe to access its
338 * mainblockq and asyncmsgq directly. */
339
340 /* If we are not running, we cannot produce any data */
341 if (!pa_atomic_load(&u->thread_info.running))
342 return;
343
344 /* Maybe there's some data in the requesting output's queue
345 * now? */
346 while (pa_asyncmsgq_process_one(o->inq) > 0)
347 ;
348
349 /* Ok, now let's prepare some data if we really have to */
350 while (!pa_memblockq_is_readable(o->memblockq)) {
351 struct output *j;
352 pa_memchunk chunk;
353
354 /* Render data! */
355 pa_sink_render(u->sink, length, &chunk);
356
357 u->thread_info.counter += chunk.length;
358
359 /* OK, let's send this data to the other threads */
360 for (j = u->thread_info.active_outputs; j; j = j->next)
361
362 /* Send to other outputs, which are not the requesting
363 * one */
364
365 if (j != o)
366 pa_asyncmsgq_post(j->inq, PA_MSGOBJECT(j->sink_input), SINK_INPUT_MESSAGE_POST, NULL, 0, &chunk, NULL);
367
368 /* And place it directly into the requesting output's queue */
369 if (o)
370 pa_memblockq_push_align(o->memblockq, &chunk);
371
372 pa_memblock_unref(chunk.memblock);
373 }
374 }
375
376 /* Called from I/O thread context */
377 static void request_memblock(struct output *o, size_t length) {
378 pa_assert(o);
379 pa_sink_input_assert_ref(o->sink_input);
380 pa_sink_assert_ref(o->userdata->sink);
381
382 /* If another thread already prepared some data we received
383 * the data over the asyncmsgq, hence let's first process
384 * it. */
385 while (pa_asyncmsgq_process_one(o->inq) > 0)
386 ;
387
388 /* Check whether we're now readable */
389 if (pa_memblockq_is_readable(o->memblockq))
390 return;
391
392 /* OK, we need to prepare new data, but only if the sink is actually running */
393 if (pa_atomic_load(&o->userdata->thread_info.running))
394 pa_asyncmsgq_send(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_NEED, o, (int64_t) length, NULL);
395 }
396
397 /* Called from I/O thread context */
398 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
399 struct output *o;
400
401 pa_sink_input_assert_ref(i);
402 pa_assert_se(o = i->userdata);
403
404 /* If necessary, get some new data */
405 request_memblock(o, nbytes);
406
407 if (pa_memblockq_peek(o->memblockq, chunk) < 0)
408 return -1;
409
410 pa_memblockq_drop(o->memblockq, chunk->length);
411 return 0;
412 }
413
414 /* Called from I/O thread context */
415 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
416 struct output *o;
417
418 pa_sink_input_assert_ref(i);
419 pa_assert_se(o = i->userdata);
420
421 pa_memblockq_rewind(o->memblockq, nbytes);
422 }
423
424 /* Called from I/O thread context */
425 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
426 struct output *o;
427
428 pa_sink_input_assert_ref(i);
429 pa_assert_se(o = i->userdata);
430
431 pa_memblockq_set_maxrewind(o->memblockq, nbytes);
432 }
433
434 /* Called from I/O thread context */
435 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
436 struct output *o;
437
438 pa_sink_input_assert_ref(i);
439 pa_assert_se(o = i->userdata);
440
441 if (pa_atomic_load(&o->max_request) == (int) nbytes)
442 return;
443
444 pa_atomic_store(&o->max_request, (int) nbytes);
445
446 pa_asyncmsgq_post(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_UPDATE_MAX_REQUEST, NULL, 0, NULL, NULL);
447 }
448
449 /* Called from I/O thread context */
450 static void sink_input_attach_cb(pa_sink_input *i) {
451 struct output *o;
452
453 pa_sink_input_assert_ref(i);
454 pa_assert_se(o = i->userdata);
455
456 /* Set up the queue from the sink thread to us */
457 pa_assert(!o->inq_rtpoll_item_read && !o->outq_rtpoll_item_write);
458
459 o->inq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
460 i->sink->rtpoll,
461 PA_RTPOLL_LATE, /* This one is not that important, since we check for data in _peek() anyway. */
462 o->inq);
463
464 o->outq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
465 i->sink->rtpoll,
466 PA_RTPOLL_EARLY,
467 o->outq);
468 }
469
470 /* Called from I/O thread context */
471 static void sink_input_detach_cb(pa_sink_input *i) {
472 struct output *o;
473
474 pa_sink_input_assert_ref(i);
475 pa_assert_se(o = i->userdata);
476
477 /* Shut down the queue from the sink thread to us */
478 pa_assert(o->inq_rtpoll_item_read && o->outq_rtpoll_item_write);
479
480 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
481 o->inq_rtpoll_item_read = NULL;
482
483 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
484 o->outq_rtpoll_item_write = NULL;
485 }
486
487 /* Called from main context */
488 static void sink_input_kill_cb(pa_sink_input *i) {
489 struct output *o;
490
491 pa_sink_input_assert_ref(i);
492 pa_assert_se(o = i->userdata);
493
494 pa_module_unload_request(o->userdata->module, TRUE);
495 output_free(o);
496 }
497
498 /* Called from IO thread context */
499 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
500 struct userdata *u;
501
502 pa_sink_input_assert_ref(i);
503 pa_assert_se(u = i->userdata);
504
505 /* If we are added for the first time, ask for a rewinding so that
506 * we are heard right-away. */
507 if (PA_SINK_INPUT_IS_LINKED(state) &&
508 i->thread_info.state == PA_SINK_INPUT_INIT)
509 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
510 }
511
512 /* Called from thread context */
513 static int sink_input_process_msg(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
514 struct output *o = PA_SINK_INPUT(obj)->userdata;
515
516 switch (code) {
517
518 case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
519 pa_usec_t *r = data;
520
521 *r = pa_bytes_to_usec(pa_memblockq_get_length(o->memblockq), &o->sink_input->sample_spec);
522
523 /* Fall through, the default handler will add in the extra
524 * latency added by the resampler */
525 break;
526 }
527
528 case SINK_INPUT_MESSAGE_POST:
529
530 if (PA_SINK_IS_OPENED(o->sink_input->sink->thread_info.state))
531 pa_memblockq_push_align(o->memblockq, chunk);
532 else
533 pa_memblockq_flush_write(o->memblockq);
534
535 return 0;
536 }
537
538 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
539 }
540
541 /* Called from main context */
542 static void disable_output(struct output *o) {
543 pa_assert(o);
544
545 if (!o->sink_input)
546 return;
547
548 pa_sink_input_unlink(o->sink_input);
549 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_REMOVE_OUTPUT, o, 0, NULL);
550 pa_sink_input_unref(o->sink_input);
551 o->sink_input = NULL;
552 }
553
554 /* Called from main context */
555 static void enable_output(struct output *o) {
556 pa_assert(o);
557
558 if (o->sink_input)
559 return;
560
561 if (output_create_sink_input(o) >= 0) {
562
563 pa_memblockq_flush_write(o->memblockq);
564
565 pa_sink_input_put(o->sink_input);
566
567 if (o->userdata->sink && PA_SINK_IS_LINKED(pa_sink_get_state(o->userdata->sink)))
568 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
569 }
570 }
571
572 /* Called from main context */
573 static void suspend(struct userdata *u) {
574 struct output *o;
575 uint32_t idx;
576
577 pa_assert(u);
578
579 /* Let's suspend by unlinking all streams */
580 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
581 disable_output(o);
582
583 pa_log_info("Device suspended...");
584 }
585
586 /* Called from main context */
587 static void unsuspend(struct userdata *u) {
588 struct output *o;
589 uint32_t idx;
590
591 pa_assert(u);
592
593 /* Let's resume */
594 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
595
596 pa_sink_suspend(o->sink, FALSE);
597
598 if (PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
599 enable_output(o);
600 }
601
602 pa_log_info("Resumed successfully...");
603 }
604
605 /* Called from main context */
606 static int sink_set_state(pa_sink *sink, pa_sink_state_t state) {
607 struct userdata *u;
608
609 pa_sink_assert_ref(sink);
610 pa_assert_se(u = sink->userdata);
611
612 /* Please note that in contrast to the ALSA modules we call
613 * suspend/unsuspend from main context here! */
614
615 switch (state) {
616 case PA_SINK_SUSPENDED:
617 pa_assert(PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)));
618
619 suspend(u);
620 break;
621
622 case PA_SINK_IDLE:
623 case PA_SINK_RUNNING:
624
625 if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED)
626 unsuspend(u);
627
628 break;
629
630 case PA_SINK_UNLINKED:
631 case PA_SINK_INIT:
632 case PA_SINK_INVALID_STATE:
633 ;
634 }
635
636 return 0;
637 }
638
639 /* Called from IO context */
640 static void update_max_request(struct userdata *u) {
641 size_t max_request = 0;
642 struct output *o;
643
644 for (o = u->thread_info.active_outputs; o; o = o->next) {
645 size_t mr = (size_t) pa_atomic_load(&o->max_request);
646
647 if (mr > max_request)
648 max_request = mr;
649 }
650
651 if (max_request <= 0)
652 max_request = pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec);
653
654 pa_sink_set_max_request_within_thread(u->sink, max_request);
655 }
656
657 /* Called from thread context of the io thread */
658 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
659 struct userdata *u = PA_SINK(o)->userdata;
660
661 switch (code) {
662
663 case PA_SINK_MESSAGE_SET_STATE:
664 pa_atomic_store(&u->thread_info.running, PA_PTR_TO_UINT(data) == PA_SINK_RUNNING);
665
666 if (PA_PTR_TO_UINT(data) == PA_SINK_SUSPENDED)
667 pa_smoother_pause(u->thread_info.smoother, pa_rtclock_usec());
668 else
669 pa_smoother_resume(u->thread_info.smoother, pa_rtclock_usec(), TRUE);
670
671 break;
672
673 case PA_SINK_MESSAGE_GET_LATENCY: {
674 pa_usec_t x, y, c, *delay = data;
675
676 x = pa_rtclock_usec();
677 y = pa_smoother_get(u->thread_info.smoother, x);
678
679 c = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
680
681 if (y < c)
682 *delay = c - y;
683 else
684 *delay = 0;
685
686 return 0;
687 }
688
689 case SINK_MESSAGE_ADD_OUTPUT: {
690 struct output *op = data;
691
692 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, op);
693
694 pa_assert(!op->outq_rtpoll_item_read && !op->inq_rtpoll_item_write);
695
696 op->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
697 u->rtpoll,
698 PA_RTPOLL_EARLY-1, /* This item is very important */
699 op->outq);
700 op->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
701 u->rtpoll,
702 PA_RTPOLL_EARLY,
703 op->inq);
704
705 update_max_request(u);
706 return 0;
707 }
708
709 case SINK_MESSAGE_REMOVE_OUTPUT: {
710 struct output *op = data;
711
712 PA_LLIST_REMOVE(struct output, u->thread_info.active_outputs, op);
713
714 pa_assert(op->outq_rtpoll_item_read && op->inq_rtpoll_item_write);
715
716 pa_rtpoll_item_free(op->outq_rtpoll_item_read);
717 op->outq_rtpoll_item_read = NULL;
718
719 pa_rtpoll_item_free(op->inq_rtpoll_item_write);
720 op->inq_rtpoll_item_write = NULL;
721
722 update_max_request(u);
723 return 0;
724 }
725
726 case SINK_MESSAGE_NEED:
727 render_memblock(u, (struct output*) data, (size_t) offset);
728 return 0;
729
730 case SINK_MESSAGE_UPDATE_LATENCY: {
731 pa_usec_t x, y, latency = (pa_usec_t) offset;
732
733 x = pa_rtclock_usec();
734 y = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
735
736 if (y > latency)
737 y -= latency;
738 else
739 y = 0;
740
741 pa_smoother_put(u->thread_info.smoother, x, y);
742 return 0;
743 }
744
745 case SINK_MESSAGE_UPDATE_MAX_REQUEST:
746
747 update_max_request(u);
748 break;
749 }
750
751 return pa_sink_process_msg(o, code, data, offset, chunk);
752 }
753
754 static void update_description(struct userdata *u) {
755 pa_bool_t first = TRUE;
756 char *t;
757 struct output *o;
758 uint32_t idx;
759
760 pa_assert(u);
761
762 if (pa_idxset_isempty(u->outputs)) {
763 pa_sink_set_description(u->sink, "Simultaneous output");
764 return;
765 }
766
767 t = pa_xstrdup("Simultaneous output to");
768
769 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
770 char *e;
771
772 if (first) {
773 e = pa_sprintf_malloc("%s %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
774 first = FALSE;
775 } else
776 e = pa_sprintf_malloc("%s, %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
777
778 pa_xfree(t);
779 t = e;
780 }
781
782 pa_sink_set_description(u->sink, t);
783 pa_xfree(t);
784 }
785
786 static int output_create_sink_input(struct output *o) {
787 pa_sink_input_new_data data;
788
789 pa_assert(o);
790
791 if (o->sink_input)
792 return 0;
793
794 pa_sink_input_new_data_init(&data);
795 data.sink = o->sink;
796 data.driver = __FILE__;
797 pa_proplist_setf(data.proplist, PA_PROP_MEDIA_NAME, "Simultaneous output on %s", pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
798 pa_proplist_sets(data.proplist, PA_PROP_MEDIA_ROLE, "filter");
799 pa_sink_input_new_data_set_sample_spec(&data, &o->userdata->sink->sample_spec);
800 pa_sink_input_new_data_set_channel_map(&data, &o->userdata->sink->channel_map);
801 data.module = o->userdata->module;
802 data.resample_method = o->userdata->resample_method;
803
804 pa_sink_input_new(&o->sink_input, o->userdata->core, &data, PA_SINK_INPUT_VARIABLE_RATE|PA_SINK_INPUT_DONT_MOVE);
805
806 pa_sink_input_new_data_done(&data);
807
808 if (!o->sink_input)
809 return -1;
810
811 o->sink_input->parent.process_msg = sink_input_process_msg;
812 o->sink_input->pop = sink_input_pop_cb;
813 o->sink_input->process_rewind = sink_input_process_rewind_cb;
814 o->sink_input->state_change = sink_input_state_change_cb;
815 o->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
816 o->sink_input->update_max_request = sink_input_update_max_request_cb;
817 o->sink_input->attach = sink_input_attach_cb;
818 o->sink_input->detach = sink_input_detach_cb;
819 o->sink_input->kill = sink_input_kill_cb;
820 o->sink_input->userdata = o;
821
822 pa_sink_input_set_requested_latency(o->sink_input, BLOCK_USEC);
823
824 return 0;
825 }
826
827 static struct output *output_new(struct userdata *u, pa_sink *sink) {
828 struct output *o;
829 pa_sink_state_t state;
830
831 pa_assert(u);
832 pa_assert(sink);
833 pa_assert(u->sink);
834
835 o = pa_xnew(struct output, 1);
836 o->userdata = u;
837 o->inq = pa_asyncmsgq_new(0);
838 o->outq = pa_asyncmsgq_new(0);
839 o->inq_rtpoll_item_write = o->inq_rtpoll_item_read = NULL;
840 o->outq_rtpoll_item_write = o->outq_rtpoll_item_read = NULL;
841 o->sink = sink;
842 o->sink_input = NULL;
843 o->memblockq = pa_memblockq_new(
844 0,
845 MEMBLOCKQ_MAXLENGTH,
846 MEMBLOCKQ_MAXLENGTH,
847 pa_frame_size(&u->sink->sample_spec),
848 1,
849 0,
850 0,
851 NULL);
852 pa_atomic_store(&o->max_request, 0);
853 PA_LLIST_INIT(struct output, o);
854
855 pa_assert_se(pa_idxset_put(u->outputs, o, NULL) == 0);
856
857 state = pa_sink_get_state(u->sink);
858
859 if (state != PA_SINK_INIT)
860 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
861 else {
862 /* If the sink is not yet started, we need to do the activation ourselves */
863 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, o);
864
865 o->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
866 u->rtpoll,
867 PA_RTPOLL_EARLY-1, /* This item is very important */
868 o->outq);
869 o->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
870 u->rtpoll,
871 PA_RTPOLL_EARLY,
872 o->inq);
873 }
874
875 if (PA_SINK_IS_OPENED(state) || state == PA_SINK_INIT) {
876 pa_sink_suspend(sink, FALSE);
877
878 if (PA_SINK_IS_OPENED(pa_sink_get_state(sink)))
879 if (output_create_sink_input(o) < 0)
880 goto fail;
881 }
882
883 update_description(u);
884
885 return o;
886
887 fail:
888
889 if (o) {
890 pa_idxset_remove_by_data(u->outputs, o, NULL);
891
892 if (o->sink_input) {
893 pa_sink_input_unlink(o->sink_input);
894 pa_sink_input_unref(o->sink_input);
895 }
896
897 if (o->memblockq)
898 pa_memblockq_free(o->memblockq);
899
900 if (o->inq)
901 pa_asyncmsgq_unref(o->inq);
902
903 if (o->outq)
904 pa_asyncmsgq_unref(o->outq);
905
906 pa_xfree(o);
907 }
908
909 return NULL;
910 }
911
912 static pa_bool_t is_suitable_sink(struct userdata *u, pa_sink *s) {
913 const char *t;
914
915 pa_sink_assert_ref(s);
916
917 if (!(s->flags & PA_SINK_HARDWARE))
918 return FALSE;
919
920 if (s == u->sink)
921 return FALSE;
922
923 if ((t = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_CLASS)))
924 if (strcmp(t, "sound"))
925 return FALSE;
926
927 return TRUE;
928 }
929
930 static pa_hook_result_t sink_put_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
931 struct output *o;
932
933 pa_core_assert_ref(c);
934 pa_sink_assert_ref(s);
935 pa_assert(u);
936 pa_assert(u->automatic);
937
938 if (!is_suitable_sink(u, s))
939 return PA_HOOK_OK;
940
941 pa_log_info("Configuring new sink: %s", s->name);
942
943 if (!(o = output_new(u, s))) {
944 pa_log("Failed to create sink input on sink '%s'.", s->name);
945 return PA_HOOK_OK;
946 }
947
948 if (o->sink_input)
949 pa_sink_input_put(o->sink_input);
950
951 return PA_HOOK_OK;
952 }
953
954 static struct output* find_output(struct userdata *u, pa_sink *s) {
955 struct output *o;
956 uint32_t idx;
957
958 pa_assert(u);
959 pa_assert(s);
960
961 if (u->sink == s)
962 return NULL;
963
964 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
965 if (o->sink == s)
966 return o;
967
968 return NULL;
969 }
970
971 static pa_hook_result_t sink_unlink_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
972 struct output *o;
973
974 pa_assert(c);
975 pa_sink_assert_ref(s);
976 pa_assert(u);
977
978 if (!(o = find_output(u, s)))
979 return PA_HOOK_OK;
980
981 pa_log_info("Unconfiguring sink: %s", s->name);
982
983 output_free(o);
984
985 return PA_HOOK_OK;
986 }
987
988 static pa_hook_result_t sink_state_changed_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
989 struct output *o;
990 pa_sink_state_t state;
991
992 if (!(o = find_output(u, s)))
993 return PA_HOOK_OK;
994
995 state = pa_sink_get_state(s);
996
997 if (PA_SINK_IS_OPENED(state) && PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)) && !o->sink_input)
998 enable_output(o);
999
1000 if (state == PA_SINK_SUSPENDED && o->sink_input)
1001 disable_output(o);
1002
1003 return PA_HOOK_OK;
1004 }
1005
1006 int pa__init(pa_module*m) {
1007 struct userdata *u;
1008 pa_modargs *ma = NULL;
1009 const char *slaves, *rm;
1010 int resample_method = PA_RESAMPLER_TRIVIAL;
1011 pa_sample_spec ss;
1012 pa_channel_map map;
1013 struct output *o;
1014 uint32_t idx;
1015 pa_sink_new_data data;
1016
1017 pa_assert(m);
1018
1019 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1020 pa_log("failed to parse module arguments");
1021 goto fail;
1022 }
1023
1024 if ((rm = pa_modargs_get_value(ma, "resample_method", NULL))) {
1025 if ((resample_method = pa_parse_resample_method(rm)) < 0) {
1026 pa_log("invalid resample method '%s'", rm);
1027 goto fail;
1028 }
1029 }
1030
1031 m->userdata = u = pa_xnew(struct userdata, 1);
1032 u->core = m->core;
1033 u->module = m;
1034 u->sink = NULL;
1035 u->time_event = NULL;
1036 u->adjust_time = DEFAULT_ADJUST_TIME;
1037 u->rtpoll = pa_rtpoll_new();
1038 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1039 u->thread = NULL;
1040 u->resample_method = resample_method;
1041 u->outputs = pa_idxset_new(NULL, NULL);
1042 memset(&u->adjust_timestamp, 0, sizeof(u->adjust_timestamp));
1043 u->sink_put_slot = u->sink_unlink_slot = u->sink_state_changed_slot = NULL;
1044 PA_LLIST_HEAD_INIT(struct output, u->thread_info.active_outputs);
1045 pa_atomic_store(&u->thread_info.running, FALSE);
1046 u->thread_info.in_null_mode = FALSE;
1047 u->thread_info.counter = 0;
1048 u->thread_info.smoother = pa_smoother_new(
1049 PA_USEC_PER_SEC,
1050 PA_USEC_PER_SEC*2,
1051 TRUE,
1052 TRUE,
1053 10,
1054 0,
1055 FALSE);
1056
1057 if (pa_modargs_get_value_u32(ma, "adjust_time", &u->adjust_time) < 0) {
1058 pa_log("Failed to parse adjust_time value");
1059 goto fail;
1060 }
1061
1062 slaves = pa_modargs_get_value(ma, "slaves", NULL);
1063 u->automatic = !slaves;
1064
1065 ss = m->core->default_sample_spec;
1066 map = m->core->default_channel_map;
1067 if ((pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0)) {
1068 pa_log("Invalid sample specification.");
1069 goto fail;
1070 }
1071
1072 pa_sink_new_data_init(&data);
1073 data.namereg_fail = FALSE;
1074 data.driver = __FILE__;
1075 data.module = m;
1076 pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME));
1077 pa_sink_new_data_set_sample_spec(&data, &ss);
1078 pa_sink_new_data_set_channel_map(&data, &map);
1079 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Simultaneous Output");
1080 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1081
1082 if (slaves)
1083 pa_proplist_sets(data.proplist, "combine.slaves", slaves);
1084
1085 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1086 pa_log("Invalid properties");
1087 pa_sink_new_data_done(&data);
1088 goto fail;
1089 }
1090
1091 u->sink = pa_sink_new(m->core, &data, PA_SINK_LATENCY);
1092 pa_sink_new_data_done(&data);
1093
1094 if (!u->sink) {
1095 pa_log("Failed to create sink");
1096 goto fail;
1097 }
1098
1099 u->sink->parent.process_msg = sink_process_msg;
1100 u->sink->set_state = sink_set_state;
1101 u->sink->userdata = u;
1102
1103 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1104 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1105
1106 u->block_usec = BLOCK_USEC;
1107 pa_sink_set_max_request(u->sink, pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec));
1108
1109 if (!u->automatic) {
1110 const char*split_state;
1111 char *n = NULL;
1112 pa_assert(slaves);
1113
1114 /* The slaves have been specified manually */
1115
1116 split_state = NULL;
1117 while ((n = pa_split(slaves, ",", &split_state))) {
1118 pa_sink *slave_sink;
1119
1120 if (!(slave_sink = pa_namereg_get(m->core, n, PA_NAMEREG_SINK)) || slave_sink == u->sink) {
1121 pa_log("Invalid slave sink '%s'", n);
1122 pa_xfree(n);
1123 goto fail;
1124 }
1125
1126 pa_xfree(n);
1127
1128 if (!output_new(u, slave_sink)) {
1129 pa_log("Failed to create slave sink input on sink '%s'.", slave_sink->name);
1130 goto fail;
1131 }
1132 }
1133
1134 if (pa_idxset_size(u->outputs) <= 1)
1135 pa_log_warn("No slave sinks specified.");
1136
1137 u->sink_put_slot = NULL;
1138
1139 } else {
1140 pa_sink *s;
1141
1142 /* We're in automatic mode, we add every sink that matches our needs */
1143
1144 for (s = pa_idxset_first(m->core->sinks, &idx); s; s = pa_idxset_next(m->core->sinks, &idx)) {
1145
1146 if (!is_suitable_sink(u, s))
1147 continue;
1148
1149 if (!output_new(u, s)) {
1150 pa_log("Failed to create sink input on sink '%s'.", s->name);
1151 goto fail;
1152 }
1153 }
1154
1155 u->sink_put_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_PUT], PA_HOOK_LATE, (pa_hook_cb_t) sink_put_hook_cb, u);
1156 }
1157
1158 u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], PA_HOOK_EARLY, (pa_hook_cb_t) sink_unlink_hook_cb, u);
1159 u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], PA_HOOK_NORMAL, (pa_hook_cb_t) sink_state_changed_hook_cb, u);
1160
1161 if (!(u->thread = pa_thread_new(thread_func, u))) {
1162 pa_log("Failed to create thread.");
1163 goto fail;
1164 }
1165
1166 /* Activate the sink and the sink inputs */
1167 pa_sink_put(u->sink);
1168
1169 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
1170 if (o->sink_input)
1171 pa_sink_input_put(o->sink_input);
1172
1173 if (u->adjust_time > 0) {
1174 struct timeval tv;
1175 pa_gettimeofday(&tv);
1176 tv.tv_sec += (time_t) u->adjust_time;
1177 u->time_event = m->core->mainloop->time_new(m->core->mainloop, &tv, time_callback, u);
1178 }
1179
1180 pa_modargs_free(ma);
1181
1182 return 0;
1183
1184 fail:
1185
1186 if (ma)
1187 pa_modargs_free(ma);
1188
1189 pa__done(m);
1190
1191 return -1;
1192 }
1193
1194 static void output_free(struct output *o) {
1195 pa_assert(o);
1196
1197 disable_output(o);
1198
1199 pa_assert_se(pa_idxset_remove_by_data(o->userdata->outputs, o, NULL));
1200
1201 update_description(o->userdata);
1202
1203 if (o->inq_rtpoll_item_read)
1204 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
1205 if (o->inq_rtpoll_item_write)
1206 pa_rtpoll_item_free(o->inq_rtpoll_item_write);
1207
1208 if (o->outq_rtpoll_item_read)
1209 pa_rtpoll_item_free(o->outq_rtpoll_item_read);
1210 if (o->outq_rtpoll_item_write)
1211 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
1212
1213 if (o->inq)
1214 pa_asyncmsgq_unref(o->inq);
1215
1216 if (o->outq)
1217 pa_asyncmsgq_unref(o->outq);
1218
1219 if (o->memblockq)
1220 pa_memblockq_free(o->memblockq);
1221
1222 pa_xfree(o);
1223 }
1224
1225 void pa__done(pa_module*m) {
1226 struct userdata *u;
1227 struct output *o;
1228
1229 pa_assert(m);
1230
1231 if (!(u = m->userdata))
1232 return;
1233
1234 if (u->sink_put_slot)
1235 pa_hook_slot_free(u->sink_put_slot);
1236
1237 if (u->sink_unlink_slot)
1238 pa_hook_slot_free(u->sink_unlink_slot);
1239
1240 if (u->sink_state_changed_slot)
1241 pa_hook_slot_free(u->sink_state_changed_slot);
1242
1243 if (u->outputs) {
1244 while ((o = pa_idxset_first(u->outputs, NULL)))
1245 output_free(o);
1246
1247 pa_idxset_free(u->outputs, NULL, NULL);
1248 }
1249
1250 if (u->sink)
1251 pa_sink_unlink(u->sink);
1252
1253 if (u->thread) {
1254 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1255 pa_thread_free(u->thread);
1256 }
1257
1258 pa_thread_mq_done(&u->thread_mq);
1259
1260 if (u->sink)
1261 pa_sink_unref(u->sink);
1262
1263 if (u->rtpoll)
1264 pa_rtpoll_free(u->rtpoll);
1265
1266 if (u->time_event)
1267 u->core->mainloop->time_free(u->time_event);
1268
1269 if (u->thread_info.smoother)
1270 pa_smoother_free(u->thread_info.smoother);
1271
1272 pa_xfree(u);
1273 }