]> code.delx.au - pulseaudio/blob - src/modules/module-combine.c
rtclock: fix issues found by Lennart
[pulseaudio] / src / modules / module-combine.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28
29 #include <pulse/rtclock.h>
30 #include <pulse/timeval.h>
31 #include <pulse/xmalloc.h>
32
33 #include <pulsecore/macro.h>
34 #include <pulsecore/module.h>
35 #include <pulsecore/llist.h>
36 #include <pulsecore/sink.h>
37 #include <pulsecore/sink-input.h>
38 #include <pulsecore/memblockq.h>
39 #include <pulsecore/log.h>
40 #include <pulsecore/core-rtclock.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/mutex.h>
45 #include <pulsecore/thread.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/core-error.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "module-combine-symdef.h"
52
53 PA_MODULE_AUTHOR("Lennart Poettering");
54 PA_MODULE_DESCRIPTION("Combine multiple sinks to one");
55 PA_MODULE_VERSION(PACKAGE_VERSION);
56 PA_MODULE_LOAD_ONCE(FALSE);
57 PA_MODULE_USAGE(
58 "sink_name=<name for the sink> "
59 "sink_properties=<properties for the sink> "
60 "slaves=<slave sinks> "
61 "adjust_time=<seconds> "
62 "resample_method=<method> "
63 "format=<sample format> "
64 "rate=<sample rate> "
65 "channels=<number of channels> "
66 "channel_map=<channel map>");
67
68 #define DEFAULT_SINK_NAME "combined"
69
70 #define MEMBLOCKQ_MAXLENGTH (1024*1024*16)
71
72 #define DEFAULT_ADJUST_TIME 10
73
74 #define BLOCK_USEC (PA_USEC_PER_MSEC * 200)
75
76 static const char* const valid_modargs[] = {
77 "sink_name",
78 "sink_properties",
79 "slaves",
80 "adjust_time",
81 "resample_method",
82 "format",
83 "rate",
84 "channels",
85 "channel_map",
86 NULL
87 };
88
89 struct output {
90 struct userdata *userdata;
91
92 pa_sink *sink;
93 pa_sink_input *sink_input;
94
95 pa_asyncmsgq *inq, /* Message queue from the sink thread to this sink input */
96 *outq; /* Message queue from this sink input to the sink thread */
97 pa_rtpoll_item *inq_rtpoll_item_read, *inq_rtpoll_item_write;
98 pa_rtpoll_item *outq_rtpoll_item_read, *outq_rtpoll_item_write;
99
100 pa_memblockq *memblockq;
101
102 pa_usec_t total_latency;
103
104 pa_atomic_t max_request;
105
106 PA_LLIST_FIELDS(struct output);
107 };
108
109 struct userdata {
110 pa_core *core;
111 pa_module *module;
112 pa_sink *sink;
113
114 pa_thread *thread;
115 pa_thread_mq thread_mq;
116 pa_rtpoll *rtpoll;
117
118 pa_time_event *time_event;
119 uint32_t adjust_time;
120
121 pa_bool_t automatic;
122
123 pa_hook_slot *sink_put_slot, *sink_unlink_slot, *sink_state_changed_slot;
124
125 pa_resample_method_t resample_method;
126
127 struct timeval adjust_timestamp;
128
129 pa_usec_t block_usec;
130
131 pa_idxset* outputs; /* managed in main context */
132
133 struct {
134 PA_LLIST_HEAD(struct output, active_outputs); /* managed in IO thread context */
135 pa_atomic_t running; /* we cache that value here, so that every thread can query it cheaply */
136 pa_usec_t timestamp;
137 pa_bool_t in_null_mode;
138 pa_smoother *smoother;
139 uint64_t counter;
140 } thread_info;
141 };
142
143 enum {
144 SINK_MESSAGE_ADD_OUTPUT = PA_SINK_MESSAGE_MAX,
145 SINK_MESSAGE_REMOVE_OUTPUT,
146 SINK_MESSAGE_NEED,
147 SINK_MESSAGE_UPDATE_LATENCY,
148 SINK_MESSAGE_UPDATE_MAX_REQUEST
149 };
150
151 enum {
152 SINK_INPUT_MESSAGE_POST = PA_SINK_INPUT_MESSAGE_MAX,
153 };
154
155 static void output_free(struct output *o);
156 static int output_create_sink_input(struct output *o);
157
158 static void adjust_rates(struct userdata *u) {
159 struct output *o;
160 pa_usec_t max_sink_latency = 0, min_total_latency = (pa_usec_t) -1, target_latency, avg_total_latency = 0;
161 uint32_t base_rate;
162 uint32_t idx;
163 unsigned n = 0;
164
165 pa_assert(u);
166 pa_sink_assert_ref(u->sink);
167
168 if (pa_idxset_size(u->outputs) <= 0)
169 return;
170
171 if (!PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)))
172 return;
173
174 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
175 pa_usec_t sink_latency;
176
177 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
178 continue;
179
180 o->total_latency = pa_sink_input_get_latency(o->sink_input, &sink_latency);
181 o->total_latency += sink_latency;
182
183 if (sink_latency > max_sink_latency)
184 max_sink_latency = sink_latency;
185
186 if (min_total_latency == (pa_usec_t) -1 || o->total_latency < min_total_latency)
187 min_total_latency = o->total_latency;
188
189 avg_total_latency += o->total_latency;
190 n++;
191 }
192
193 if (min_total_latency == (pa_usec_t) -1)
194 return;
195
196 avg_total_latency /= n;
197
198 target_latency = max_sink_latency > min_total_latency ? max_sink_latency : min_total_latency;
199
200 pa_log_info("[%s] avg total latency is %0.2f msec.", u->sink->name, (double) avg_total_latency / PA_USEC_PER_MSEC);
201 pa_log_info("[%s] target latency is %0.2f msec.", u->sink->name, (double) target_latency / PA_USEC_PER_MSEC);
202
203 base_rate = u->sink->sample_spec.rate;
204
205 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
206 uint32_t r = base_rate;
207
208 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
209 continue;
210
211 if (o->total_latency < target_latency)
212 r -= (uint32_t) ((((double) (target_latency - o->total_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
213 else if (o->total_latency > target_latency)
214 r += (uint32_t) ((((double) (o->total_latency - target_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
215
216 if (r < (uint32_t) (base_rate*0.9) || r > (uint32_t) (base_rate*1.1)) {
217 pa_log_warn("[%s] sample rates too different, not adjusting (%u vs. %u).", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), base_rate, r);
218 pa_sink_input_set_rate(o->sink_input, base_rate);
219 } else {
220 pa_log_info("[%s] new rate is %u Hz; ratio is %0.3f; latency is %0.0f usec.", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), r, (double) r / base_rate, (float) o->total_latency);
221 pa_sink_input_set_rate(o->sink_input, r);
222 }
223 }
224
225 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_UPDATE_LATENCY, NULL, (int64_t) avg_total_latency, NULL);
226 }
227
228 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
229 struct userdata *u = userdata;
230
231 pa_assert(u);
232 pa_assert(a);
233 pa_assert(u->time_event == e);
234
235 adjust_rates(u);
236
237 pa_core_rttime_restart(u->core, e, pa_rtclock_now() + u->adjust_time * PA_USEC_PER_SEC);
238 }
239
240 static void process_render_null(struct userdata *u, pa_usec_t now) {
241 size_t ate = 0;
242 pa_assert(u);
243
244 if (u->thread_info.in_null_mode)
245 u->thread_info.timestamp = now;
246
247 while (u->thread_info.timestamp < now + u->block_usec) {
248 pa_memchunk chunk;
249
250 pa_sink_render(u->sink, u->sink->thread_info.max_request, &chunk);
251 pa_memblock_unref(chunk.memblock);
252
253 u->thread_info.counter += chunk.length;
254
255 /* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */
256 u->thread_info.timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec);
257
258 ate += chunk.length;
259
260 if (ate >= u->sink->thread_info.max_request)
261 break;
262 }
263
264 /* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */
265
266 pa_smoother_put(u->thread_info.smoother, now,
267 pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec) - (u->thread_info.timestamp - now));
268 }
269
270 static void thread_func(void *userdata) {
271 struct userdata *u = userdata;
272
273 pa_assert(u);
274
275 pa_log_debug("Thread starting up");
276
277 if (u->core->realtime_scheduling)
278 pa_make_realtime(u->core->realtime_priority+1);
279
280 pa_thread_mq_install(&u->thread_mq);
281 pa_rtpoll_install(u->rtpoll);
282
283 u->thread_info.timestamp = pa_rtclock_now();
284 u->thread_info.in_null_mode = FALSE;
285
286 for (;;) {
287 int ret;
288
289 if (PA_SINK_IS_OPENED(u->sink->thread_info.state))
290 if (u->sink->thread_info.rewind_requested)
291 pa_sink_process_rewind(u->sink, 0);
292
293 /* If no outputs are connected, render some data and drop it immediately. */
294 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && !u->thread_info.active_outputs) {
295 pa_usec_t now;
296
297 now = pa_rtclock_now();
298
299 if (!u->thread_info.in_null_mode || u->thread_info.timestamp <= now)
300 process_render_null(u, now);
301
302 pa_rtpoll_set_timer_absolute(u->rtpoll, u->thread_info.timestamp);
303 u->thread_info.in_null_mode = TRUE;
304 } else {
305 pa_rtpoll_set_timer_disabled(u->rtpoll);
306 u->thread_info.in_null_mode = FALSE;
307 }
308
309 /* Hmm, nothing to do. Let's sleep */
310 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) {
311 pa_log_info("pa_rtpoll_run() = %i", ret);
312 goto fail;
313 }
314
315 if (ret == 0)
316 goto finish;
317 }
318
319 fail:
320 /* If this was no regular exit from the loop we have to continue
321 * processing messages until we received PA_MESSAGE_SHUTDOWN */
322 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
323 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
324
325 finish:
326 pa_log_debug("Thread shutting down");
327 }
328
329 /* Called from I/O thread context */
330 static void render_memblock(struct userdata *u, struct output *o, size_t length) {
331 pa_assert(u);
332 pa_assert(o);
333
334 /* We are run by the sink thread, on behalf of an output (o). The
335 * output is waiting for us, hence it is safe to access its
336 * mainblockq and asyncmsgq directly. */
337
338 /* If we are not running, we cannot produce any data */
339 if (!pa_atomic_load(&u->thread_info.running))
340 return;
341
342 /* Maybe there's some data in the requesting output's queue
343 * now? */
344 while (pa_asyncmsgq_process_one(o->inq) > 0)
345 ;
346
347 /* Ok, now let's prepare some data if we really have to */
348 while (!pa_memblockq_is_readable(o->memblockq)) {
349 struct output *j;
350 pa_memchunk chunk;
351
352 /* Render data! */
353 pa_sink_render(u->sink, length, &chunk);
354
355 u->thread_info.counter += chunk.length;
356
357 /* OK, let's send this data to the other threads */
358 for (j = u->thread_info.active_outputs; j; j = j->next)
359
360 /* Send to other outputs, which are not the requesting
361 * one */
362
363 if (j != o)
364 pa_asyncmsgq_post(j->inq, PA_MSGOBJECT(j->sink_input), SINK_INPUT_MESSAGE_POST, NULL, 0, &chunk, NULL);
365
366 /* And place it directly into the requesting output's queue */
367 if (o)
368 pa_memblockq_push_align(o->memblockq, &chunk);
369
370 pa_memblock_unref(chunk.memblock);
371 }
372 }
373
374 /* Called from I/O thread context */
375 static void request_memblock(struct output *o, size_t length) {
376 pa_assert(o);
377 pa_sink_input_assert_ref(o->sink_input);
378 pa_sink_assert_ref(o->userdata->sink);
379
380 /* If another thread already prepared some data we received
381 * the data over the asyncmsgq, hence let's first process
382 * it. */
383 while (pa_asyncmsgq_process_one(o->inq) > 0)
384 ;
385
386 /* Check whether we're now readable */
387 if (pa_memblockq_is_readable(o->memblockq))
388 return;
389
390 /* OK, we need to prepare new data, but only if the sink is actually running */
391 if (pa_atomic_load(&o->userdata->thread_info.running))
392 pa_asyncmsgq_send(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_NEED, o, (int64_t) length, NULL);
393 }
394
395 /* Called from I/O thread context */
396 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
397 struct output *o;
398
399 pa_sink_input_assert_ref(i);
400 pa_assert_se(o = i->userdata);
401
402 /* If necessary, get some new data */
403 request_memblock(o, nbytes);
404
405 if (pa_memblockq_peek(o->memblockq, chunk) < 0)
406 return -1;
407
408 pa_memblockq_drop(o->memblockq, chunk->length);
409 return 0;
410 }
411
412 /* Called from I/O thread context */
413 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
414 struct output *o;
415
416 pa_sink_input_assert_ref(i);
417 pa_assert_se(o = i->userdata);
418
419 pa_memblockq_rewind(o->memblockq, nbytes);
420 }
421
422 /* Called from I/O thread context */
423 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
424 struct output *o;
425
426 pa_sink_input_assert_ref(i);
427 pa_assert_se(o = i->userdata);
428
429 pa_memblockq_set_maxrewind(o->memblockq, nbytes);
430 }
431
432 /* Called from I/O thread context */
433 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
434 struct output *o;
435
436 pa_sink_input_assert_ref(i);
437 pa_assert_se(o = i->userdata);
438
439 if (pa_atomic_load(&o->max_request) == (int) nbytes)
440 return;
441
442 pa_atomic_store(&o->max_request, (int) nbytes);
443
444 pa_asyncmsgq_post(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_UPDATE_MAX_REQUEST, NULL, 0, NULL, NULL);
445 }
446
447 /* Called from I/O thread context */
448 static void sink_input_attach_cb(pa_sink_input *i) {
449 struct output *o;
450
451 pa_sink_input_assert_ref(i);
452 pa_assert_se(o = i->userdata);
453
454 /* Set up the queue from the sink thread to us */
455 pa_assert(!o->inq_rtpoll_item_read && !o->outq_rtpoll_item_write);
456
457 o->inq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
458 i->sink->rtpoll,
459 PA_RTPOLL_LATE, /* This one is not that important, since we check for data in _peek() anyway. */
460 o->inq);
461
462 o->outq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
463 i->sink->rtpoll,
464 PA_RTPOLL_EARLY,
465 o->outq);
466 }
467
468 /* Called from I/O thread context */
469 static void sink_input_detach_cb(pa_sink_input *i) {
470 struct output *o;
471
472 pa_sink_input_assert_ref(i);
473 pa_assert_se(o = i->userdata);
474
475 /* Shut down the queue from the sink thread to us */
476 pa_assert(o->inq_rtpoll_item_read && o->outq_rtpoll_item_write);
477
478 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
479 o->inq_rtpoll_item_read = NULL;
480
481 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
482 o->outq_rtpoll_item_write = NULL;
483 }
484
485 /* Called from main context */
486 static void sink_input_kill_cb(pa_sink_input *i) {
487 struct output *o;
488
489 pa_sink_input_assert_ref(i);
490 pa_assert_se(o = i->userdata);
491
492 pa_module_unload_request(o->userdata->module, TRUE);
493 output_free(o);
494 }
495
496 /* Called from IO thread context */
497 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
498 struct userdata *u;
499
500 pa_sink_input_assert_ref(i);
501 pa_assert_se(u = i->userdata);
502
503 /* If we are added for the first time, ask for a rewinding so that
504 * we are heard right-away. */
505 if (PA_SINK_INPUT_IS_LINKED(state) &&
506 i->thread_info.state == PA_SINK_INPUT_INIT)
507 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
508 }
509
510 /* Called from thread context */
511 static int sink_input_process_msg(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
512 struct output *o = PA_SINK_INPUT(obj)->userdata;
513
514 switch (code) {
515
516 case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
517 pa_usec_t *r = data;
518
519 *r = pa_bytes_to_usec(pa_memblockq_get_length(o->memblockq), &o->sink_input->sample_spec);
520
521 /* Fall through, the default handler will add in the extra
522 * latency added by the resampler */
523 break;
524 }
525
526 case SINK_INPUT_MESSAGE_POST:
527
528 if (PA_SINK_IS_OPENED(o->sink_input->sink->thread_info.state))
529 pa_memblockq_push_align(o->memblockq, chunk);
530 else
531 pa_memblockq_flush_write(o->memblockq);
532
533 return 0;
534 }
535
536 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
537 }
538
539 /* Called from main context */
540 static void disable_output(struct output *o) {
541 pa_assert(o);
542
543 if (!o->sink_input)
544 return;
545
546 pa_sink_input_unlink(o->sink_input);
547 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_REMOVE_OUTPUT, o, 0, NULL);
548 pa_sink_input_unref(o->sink_input);
549 o->sink_input = NULL;
550 }
551
552 /* Called from main context */
553 static void enable_output(struct output *o) {
554 pa_assert(o);
555
556 if (o->sink_input)
557 return;
558
559 if (output_create_sink_input(o) >= 0) {
560
561 pa_memblockq_flush_write(o->memblockq);
562
563 pa_sink_input_put(o->sink_input);
564
565 if (o->userdata->sink && PA_SINK_IS_LINKED(pa_sink_get_state(o->userdata->sink)))
566 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
567 }
568 }
569
570 /* Called from main context */
571 static void suspend(struct userdata *u) {
572 struct output *o;
573 uint32_t idx;
574
575 pa_assert(u);
576
577 /* Let's suspend by unlinking all streams */
578 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
579 disable_output(o);
580
581 pa_log_info("Device suspended...");
582 }
583
584 /* Called from main context */
585 static void unsuspend(struct userdata *u) {
586 struct output *o;
587 uint32_t idx;
588
589 pa_assert(u);
590
591 /* Let's resume */
592 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
593
594 pa_sink_suspend(o->sink, FALSE, PA_SUSPEND_IDLE);
595
596 if (PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
597 enable_output(o);
598 }
599
600 pa_log_info("Resumed successfully...");
601 }
602
603 /* Called from main context */
604 static int sink_set_state(pa_sink *sink, pa_sink_state_t state) {
605 struct userdata *u;
606
607 pa_sink_assert_ref(sink);
608 pa_assert_se(u = sink->userdata);
609
610 /* Please note that in contrast to the ALSA modules we call
611 * suspend/unsuspend from main context here! */
612
613 switch (state) {
614 case PA_SINK_SUSPENDED:
615 pa_assert(PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)));
616
617 suspend(u);
618 break;
619
620 case PA_SINK_IDLE:
621 case PA_SINK_RUNNING:
622
623 if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED)
624 unsuspend(u);
625
626 break;
627
628 case PA_SINK_UNLINKED:
629 case PA_SINK_INIT:
630 case PA_SINK_INVALID_STATE:
631 ;
632 }
633
634 return 0;
635 }
636
637 /* Called from IO context */
638 static void update_max_request(struct userdata *u) {
639 size_t max_request = 0;
640 struct output *o;
641
642 for (o = u->thread_info.active_outputs; o; o = o->next) {
643 size_t mr = (size_t) pa_atomic_load(&o->max_request);
644
645 if (mr > max_request)
646 max_request = mr;
647 }
648
649 if (max_request <= 0)
650 max_request = pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec);
651
652 pa_sink_set_max_request_within_thread(u->sink, max_request);
653 }
654
655 /* Called from thread context of the io thread */
656 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
657 struct userdata *u = PA_SINK(o)->userdata;
658
659 switch (code) {
660
661 case PA_SINK_MESSAGE_SET_STATE:
662 pa_atomic_store(&u->thread_info.running, PA_PTR_TO_UINT(data) == PA_SINK_RUNNING);
663
664 if (PA_PTR_TO_UINT(data) == PA_SINK_SUSPENDED)
665 pa_smoother_pause(u->thread_info.smoother, pa_rtclock_now());
666 else
667 pa_smoother_resume(u->thread_info.smoother, pa_rtclock_now(), TRUE);
668
669 break;
670
671 case PA_SINK_MESSAGE_GET_LATENCY: {
672 pa_usec_t x, y, c, *delay = data;
673
674 x = pa_rtclock_now();
675 y = pa_smoother_get(u->thread_info.smoother, x);
676
677 c = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
678
679 if (y < c)
680 *delay = c - y;
681 else
682 *delay = 0;
683
684 return 0;
685 }
686
687 case SINK_MESSAGE_ADD_OUTPUT: {
688 struct output *op = data;
689
690 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, op);
691
692 pa_assert(!op->outq_rtpoll_item_read && !op->inq_rtpoll_item_write);
693
694 op->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
695 u->rtpoll,
696 PA_RTPOLL_EARLY-1, /* This item is very important */
697 op->outq);
698 op->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
699 u->rtpoll,
700 PA_RTPOLL_EARLY,
701 op->inq);
702
703 update_max_request(u);
704 return 0;
705 }
706
707 case SINK_MESSAGE_REMOVE_OUTPUT: {
708 struct output *op = data;
709
710 PA_LLIST_REMOVE(struct output, u->thread_info.active_outputs, op);
711
712 pa_assert(op->outq_rtpoll_item_read && op->inq_rtpoll_item_write);
713
714 pa_rtpoll_item_free(op->outq_rtpoll_item_read);
715 op->outq_rtpoll_item_read = NULL;
716
717 pa_rtpoll_item_free(op->inq_rtpoll_item_write);
718 op->inq_rtpoll_item_write = NULL;
719
720 update_max_request(u);
721 return 0;
722 }
723
724 case SINK_MESSAGE_NEED:
725 render_memblock(u, (struct output*) data, (size_t) offset);
726 return 0;
727
728 case SINK_MESSAGE_UPDATE_LATENCY: {
729 pa_usec_t x, y, latency = (pa_usec_t) offset;
730
731 x = pa_rtclock_now();
732 y = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
733
734 if (y > latency)
735 y -= latency;
736 else
737 y = 0;
738
739 pa_smoother_put(u->thread_info.smoother, x, y);
740 return 0;
741 }
742
743 case SINK_MESSAGE_UPDATE_MAX_REQUEST:
744
745 update_max_request(u);
746 break;
747 }
748
749 return pa_sink_process_msg(o, code, data, offset, chunk);
750 }
751
752 static void update_description(struct userdata *u) {
753 pa_bool_t first = TRUE;
754 char *t;
755 struct output *o;
756 uint32_t idx;
757
758 pa_assert(u);
759
760 if (pa_idxset_isempty(u->outputs)) {
761 pa_sink_set_description(u->sink, "Simultaneous output");
762 return;
763 }
764
765 t = pa_xstrdup("Simultaneous output to");
766
767 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
768 char *e;
769
770 if (first) {
771 e = pa_sprintf_malloc("%s %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
772 first = FALSE;
773 } else
774 e = pa_sprintf_malloc("%s, %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
775
776 pa_xfree(t);
777 t = e;
778 }
779
780 pa_sink_set_description(u->sink, t);
781 pa_xfree(t);
782 }
783
784 static int output_create_sink_input(struct output *o) {
785 pa_sink_input_new_data data;
786
787 pa_assert(o);
788
789 if (o->sink_input)
790 return 0;
791
792 pa_sink_input_new_data_init(&data);
793 data.sink = o->sink;
794 data.driver = __FILE__;
795 pa_proplist_setf(data.proplist, PA_PROP_MEDIA_NAME, "Simultaneous output on %s", pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
796 pa_proplist_sets(data.proplist, PA_PROP_MEDIA_ROLE, "filter");
797 pa_sink_input_new_data_set_sample_spec(&data, &o->userdata->sink->sample_spec);
798 pa_sink_input_new_data_set_channel_map(&data, &o->userdata->sink->channel_map);
799 data.module = o->userdata->module;
800 data.resample_method = o->userdata->resample_method;
801
802 pa_sink_input_new(&o->sink_input, o->userdata->core, &data, PA_SINK_INPUT_VARIABLE_RATE|PA_SINK_INPUT_DONT_MOVE);
803
804 pa_sink_input_new_data_done(&data);
805
806 if (!o->sink_input)
807 return -1;
808
809 o->sink_input->parent.process_msg = sink_input_process_msg;
810 o->sink_input->pop = sink_input_pop_cb;
811 o->sink_input->process_rewind = sink_input_process_rewind_cb;
812 o->sink_input->state_change = sink_input_state_change_cb;
813 o->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
814 o->sink_input->update_max_request = sink_input_update_max_request_cb;
815 o->sink_input->attach = sink_input_attach_cb;
816 o->sink_input->detach = sink_input_detach_cb;
817 o->sink_input->kill = sink_input_kill_cb;
818 o->sink_input->userdata = o;
819
820 pa_sink_input_set_requested_latency(o->sink_input, BLOCK_USEC);
821
822 return 0;
823 }
824
825 static struct output *output_new(struct userdata *u, pa_sink *sink) {
826 struct output *o;
827 pa_sink_state_t state;
828
829 pa_assert(u);
830 pa_assert(sink);
831 pa_assert(u->sink);
832
833 o = pa_xnew(struct output, 1);
834 o->userdata = u;
835 o->inq = pa_asyncmsgq_new(0);
836 o->outq = pa_asyncmsgq_new(0);
837 o->inq_rtpoll_item_write = o->inq_rtpoll_item_read = NULL;
838 o->outq_rtpoll_item_write = o->outq_rtpoll_item_read = NULL;
839 o->sink = sink;
840 o->sink_input = NULL;
841 o->memblockq = pa_memblockq_new(
842 0,
843 MEMBLOCKQ_MAXLENGTH,
844 MEMBLOCKQ_MAXLENGTH,
845 pa_frame_size(&u->sink->sample_spec),
846 1,
847 0,
848 0,
849 NULL);
850 pa_atomic_store(&o->max_request, 0);
851 PA_LLIST_INIT(struct output, o);
852
853 pa_assert_se(pa_idxset_put(u->outputs, o, NULL) == 0);
854
855 state = pa_sink_get_state(u->sink);
856
857 if (state != PA_SINK_INIT)
858 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
859 else {
860 /* If the sink is not yet started, we need to do the activation ourselves */
861 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, o);
862
863 o->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
864 u->rtpoll,
865 PA_RTPOLL_EARLY-1, /* This item is very important */
866 o->outq);
867 o->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
868 u->rtpoll,
869 PA_RTPOLL_EARLY,
870 o->inq);
871 }
872
873 if (PA_SINK_IS_OPENED(state) || state == PA_SINK_INIT) {
874 pa_sink_suspend(sink, FALSE, PA_SUSPEND_IDLE);
875
876 if (PA_SINK_IS_OPENED(pa_sink_get_state(sink)))
877 if (output_create_sink_input(o) < 0)
878 goto fail;
879 }
880
881 update_description(u);
882
883 return o;
884
885 fail:
886
887 if (o) {
888 pa_idxset_remove_by_data(u->outputs, o, NULL);
889
890 if (o->sink_input) {
891 pa_sink_input_unlink(o->sink_input);
892 pa_sink_input_unref(o->sink_input);
893 }
894
895 if (o->memblockq)
896 pa_memblockq_free(o->memblockq);
897
898 if (o->inq)
899 pa_asyncmsgq_unref(o->inq);
900
901 if (o->outq)
902 pa_asyncmsgq_unref(o->outq);
903
904 pa_xfree(o);
905 }
906
907 return NULL;
908 }
909
910 static pa_bool_t is_suitable_sink(struct userdata *u, pa_sink *s) {
911 const char *t;
912
913 pa_sink_assert_ref(s);
914
915 if (!(s->flags & PA_SINK_HARDWARE))
916 return FALSE;
917
918 if (s == u->sink)
919 return FALSE;
920
921 if ((t = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_CLASS)))
922 if (strcmp(t, "sound"))
923 return FALSE;
924
925 return TRUE;
926 }
927
928 static pa_hook_result_t sink_put_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
929 struct output *o;
930
931 pa_core_assert_ref(c);
932 pa_sink_assert_ref(s);
933 pa_assert(u);
934 pa_assert(u->automatic);
935
936 if (!is_suitable_sink(u, s))
937 return PA_HOOK_OK;
938
939 pa_log_info("Configuring new sink: %s", s->name);
940
941 if (!(o = output_new(u, s))) {
942 pa_log("Failed to create sink input on sink '%s'.", s->name);
943 return PA_HOOK_OK;
944 }
945
946 if (o->sink_input)
947 pa_sink_input_put(o->sink_input);
948
949 return PA_HOOK_OK;
950 }
951
952 static struct output* find_output(struct userdata *u, pa_sink *s) {
953 struct output *o;
954 uint32_t idx;
955
956 pa_assert(u);
957 pa_assert(s);
958
959 if (u->sink == s)
960 return NULL;
961
962 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
963 if (o->sink == s)
964 return o;
965
966 return NULL;
967 }
968
969 static pa_hook_result_t sink_unlink_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
970 struct output *o;
971
972 pa_assert(c);
973 pa_sink_assert_ref(s);
974 pa_assert(u);
975
976 if (!(o = find_output(u, s)))
977 return PA_HOOK_OK;
978
979 pa_log_info("Unconfiguring sink: %s", s->name);
980
981 output_free(o);
982
983 return PA_HOOK_OK;
984 }
985
986 static pa_hook_result_t sink_state_changed_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
987 struct output *o;
988 pa_sink_state_t state;
989
990 if (!(o = find_output(u, s)))
991 return PA_HOOK_OK;
992
993 state = pa_sink_get_state(s);
994
995 if (PA_SINK_IS_OPENED(state) && PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)) && !o->sink_input)
996 enable_output(o);
997
998 if (state == PA_SINK_SUSPENDED && o->sink_input)
999 disable_output(o);
1000
1001 return PA_HOOK_OK;
1002 }
1003
1004 int pa__init(pa_module*m) {
1005 struct userdata *u;
1006 pa_modargs *ma = NULL;
1007 const char *slaves, *rm;
1008 int resample_method = PA_RESAMPLER_TRIVIAL;
1009 pa_sample_spec ss;
1010 pa_channel_map map;
1011 struct output *o;
1012 uint32_t idx;
1013 pa_sink_new_data data;
1014
1015 pa_assert(m);
1016
1017 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1018 pa_log("failed to parse module arguments");
1019 goto fail;
1020 }
1021
1022 if ((rm = pa_modargs_get_value(ma, "resample_method", NULL))) {
1023 if ((resample_method = pa_parse_resample_method(rm)) < 0) {
1024 pa_log("invalid resample method '%s'", rm);
1025 goto fail;
1026 }
1027 }
1028
1029 m->userdata = u = pa_xnew(struct userdata, 1);
1030 u->core = m->core;
1031 u->module = m;
1032 u->sink = NULL;
1033 u->time_event = NULL;
1034 u->adjust_time = DEFAULT_ADJUST_TIME;
1035 u->rtpoll = pa_rtpoll_new();
1036 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1037 u->thread = NULL;
1038 u->resample_method = resample_method;
1039 u->outputs = pa_idxset_new(NULL, NULL);
1040 memset(&u->adjust_timestamp, 0, sizeof(u->adjust_timestamp));
1041 u->sink_put_slot = u->sink_unlink_slot = u->sink_state_changed_slot = NULL;
1042 PA_LLIST_HEAD_INIT(struct output, u->thread_info.active_outputs);
1043 pa_atomic_store(&u->thread_info.running, FALSE);
1044 u->thread_info.in_null_mode = FALSE;
1045 u->thread_info.counter = 0;
1046 u->thread_info.smoother = pa_smoother_new(
1047 PA_USEC_PER_SEC,
1048 PA_USEC_PER_SEC*2,
1049 TRUE,
1050 TRUE,
1051 10,
1052 0,
1053 FALSE);
1054
1055 if (pa_modargs_get_value_u32(ma, "adjust_time", &u->adjust_time) < 0) {
1056 pa_log("Failed to parse adjust_time value");
1057 goto fail;
1058 }
1059
1060 slaves = pa_modargs_get_value(ma, "slaves", NULL);
1061 u->automatic = !slaves;
1062
1063 ss = m->core->default_sample_spec;
1064 map = m->core->default_channel_map;
1065 if ((pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0)) {
1066 pa_log("Invalid sample specification.");
1067 goto fail;
1068 }
1069
1070 pa_sink_new_data_init(&data);
1071 data.namereg_fail = FALSE;
1072 data.driver = __FILE__;
1073 data.module = m;
1074 pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME));
1075 pa_sink_new_data_set_sample_spec(&data, &ss);
1076 pa_sink_new_data_set_channel_map(&data, &map);
1077 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Simultaneous Output");
1078 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1079
1080 if (slaves)
1081 pa_proplist_sets(data.proplist, "combine.slaves", slaves);
1082
1083 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1084 pa_log("Invalid properties");
1085 pa_sink_new_data_done(&data);
1086 goto fail;
1087 }
1088
1089 u->sink = pa_sink_new(m->core, &data, PA_SINK_LATENCY);
1090 pa_sink_new_data_done(&data);
1091
1092 if (!u->sink) {
1093 pa_log("Failed to create sink");
1094 goto fail;
1095 }
1096
1097 u->sink->parent.process_msg = sink_process_msg;
1098 u->sink->set_state = sink_set_state;
1099 u->sink->userdata = u;
1100
1101 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1102 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1103
1104 u->block_usec = BLOCK_USEC;
1105 pa_sink_set_max_request(u->sink, pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec));
1106
1107 if (!u->automatic) {
1108 const char*split_state;
1109 char *n = NULL;
1110 pa_assert(slaves);
1111
1112 /* The slaves have been specified manually */
1113
1114 split_state = NULL;
1115 while ((n = pa_split(slaves, ",", &split_state))) {
1116 pa_sink *slave_sink;
1117
1118 if (!(slave_sink = pa_namereg_get(m->core, n, PA_NAMEREG_SINK)) || slave_sink == u->sink) {
1119 pa_log("Invalid slave sink '%s'", n);
1120 pa_xfree(n);
1121 goto fail;
1122 }
1123
1124 pa_xfree(n);
1125
1126 if (!output_new(u, slave_sink)) {
1127 pa_log("Failed to create slave sink input on sink '%s'.", slave_sink->name);
1128 goto fail;
1129 }
1130 }
1131
1132 if (pa_idxset_size(u->outputs) <= 1)
1133 pa_log_warn("No slave sinks specified.");
1134
1135 u->sink_put_slot = NULL;
1136
1137 } else {
1138 pa_sink *s;
1139
1140 /* We're in automatic mode, we add every sink that matches our needs */
1141
1142 for (s = pa_idxset_first(m->core->sinks, &idx); s; s = pa_idxset_next(m->core->sinks, &idx)) {
1143
1144 if (!is_suitable_sink(u, s))
1145 continue;
1146
1147 if (!output_new(u, s)) {
1148 pa_log("Failed to create sink input on sink '%s'.", s->name);
1149 goto fail;
1150 }
1151 }
1152
1153 u->sink_put_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_PUT], PA_HOOK_LATE, (pa_hook_cb_t) sink_put_hook_cb, u);
1154 }
1155
1156 u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], PA_HOOK_EARLY, (pa_hook_cb_t) sink_unlink_hook_cb, u);
1157 u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], PA_HOOK_NORMAL, (pa_hook_cb_t) sink_state_changed_hook_cb, u);
1158
1159 if (!(u->thread = pa_thread_new(thread_func, u))) {
1160 pa_log("Failed to create thread.");
1161 goto fail;
1162 }
1163
1164 /* Activate the sink and the sink inputs */
1165 pa_sink_put(u->sink);
1166
1167 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
1168 if (o->sink_input)
1169 pa_sink_input_put(o->sink_input);
1170
1171 if (u->adjust_time > 0)
1172 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time * PA_USEC_PER_SEC, time_callback, u);
1173
1174 pa_modargs_free(ma);
1175
1176 return 0;
1177
1178 fail:
1179
1180 if (ma)
1181 pa_modargs_free(ma);
1182
1183 pa__done(m);
1184
1185 return -1;
1186 }
1187
1188 static void output_free(struct output *o) {
1189 pa_assert(o);
1190
1191 disable_output(o);
1192
1193 pa_assert_se(pa_idxset_remove_by_data(o->userdata->outputs, o, NULL));
1194
1195 update_description(o->userdata);
1196
1197 if (o->inq_rtpoll_item_read)
1198 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
1199 if (o->inq_rtpoll_item_write)
1200 pa_rtpoll_item_free(o->inq_rtpoll_item_write);
1201
1202 if (o->outq_rtpoll_item_read)
1203 pa_rtpoll_item_free(o->outq_rtpoll_item_read);
1204 if (o->outq_rtpoll_item_write)
1205 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
1206
1207 if (o->inq)
1208 pa_asyncmsgq_unref(o->inq);
1209
1210 if (o->outq)
1211 pa_asyncmsgq_unref(o->outq);
1212
1213 if (o->memblockq)
1214 pa_memblockq_free(o->memblockq);
1215
1216 pa_xfree(o);
1217 }
1218
1219 void pa__done(pa_module*m) {
1220 struct userdata *u;
1221 struct output *o;
1222
1223 pa_assert(m);
1224
1225 if (!(u = m->userdata))
1226 return;
1227
1228 if (u->sink_put_slot)
1229 pa_hook_slot_free(u->sink_put_slot);
1230
1231 if (u->sink_unlink_slot)
1232 pa_hook_slot_free(u->sink_unlink_slot);
1233
1234 if (u->sink_state_changed_slot)
1235 pa_hook_slot_free(u->sink_state_changed_slot);
1236
1237 if (u->outputs) {
1238 while ((o = pa_idxset_first(u->outputs, NULL)))
1239 output_free(o);
1240
1241 pa_idxset_free(u->outputs, NULL, NULL);
1242 }
1243
1244 if (u->sink)
1245 pa_sink_unlink(u->sink);
1246
1247 if (u->thread) {
1248 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1249 pa_thread_free(u->thread);
1250 }
1251
1252 pa_thread_mq_done(&u->thread_mq);
1253
1254 if (u->sink)
1255 pa_sink_unref(u->sink);
1256
1257 if (u->rtpoll)
1258 pa_rtpoll_free(u->rtpoll);
1259
1260 if (u->time_event)
1261 u->core->mainloop->time_free(u->time_event);
1262
1263 if (u->thread_info.smoother)
1264 pa_smoother_free(u->thread_info.smoother);
1265
1266 pa_xfree(u);
1267 }