]> code.delx.au - pulseaudio/blob - src/modules/module-combine.c
Merge most of elmarco/rtclock2
[pulseaudio] / src / modules / module-combine.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2004-2008 Lennart Poettering
5
6 PulseAudio is free software; you can redistribute it and/or modify
7 it under the terms of the GNU Lesser General Public License as published
8 by the Free Software Foundation; either version 2.1 of the License,
9 or (at your option) any later version.
10
11 PulseAudio is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with PulseAudio; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA.
20 ***/
21
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <stdio.h>
27 #include <errno.h>
28
29 #include <pulse/rtclock.h>
30 #include <pulse/timeval.h>
31 #include <pulse/xmalloc.h>
32
33 #include <pulsecore/macro.h>
34 #include <pulsecore/module.h>
35 #include <pulsecore/llist.h>
36 #include <pulsecore/sink.h>
37 #include <pulsecore/sink-input.h>
38 #include <pulsecore/memblockq.h>
39 #include <pulsecore/log.h>
40 #include <pulsecore/core-rtclock.h>
41 #include <pulsecore/core-util.h>
42 #include <pulsecore/modargs.h>
43 #include <pulsecore/namereg.h>
44 #include <pulsecore/mutex.h>
45 #include <pulsecore/thread.h>
46 #include <pulsecore/thread-mq.h>
47 #include <pulsecore/rtpoll.h>
48 #include <pulsecore/core-error.h>
49 #include <pulsecore/time-smoother.h>
50
51 #include "module-combine-symdef.h"
52
53 PA_MODULE_AUTHOR("Lennart Poettering");
54 PA_MODULE_DESCRIPTION("Combine multiple sinks to one");
55 PA_MODULE_VERSION(PACKAGE_VERSION);
56 PA_MODULE_LOAD_ONCE(FALSE);
57 PA_MODULE_USAGE(
58 "sink_name=<name for the sink> "
59 "sink_properties=<properties for the sink> "
60 "slaves=<slave sinks> "
61 "adjust_time=<seconds> "
62 "resample_method=<method> "
63 "format=<sample format> "
64 "rate=<sample rate> "
65 "channels=<number of channels> "
66 "channel_map=<channel map>");
67
68 #define DEFAULT_SINK_NAME "combined"
69
70 #define MEMBLOCKQ_MAXLENGTH (1024*1024*16)
71
72 #define DEFAULT_ADJUST_TIME 10
73
74 #define BLOCK_USEC (PA_USEC_PER_MSEC * 200)
75
76 static const char* const valid_modargs[] = {
77 "sink_name",
78 "sink_properties",
79 "slaves",
80 "adjust_time",
81 "resample_method",
82 "format",
83 "rate",
84 "channels",
85 "channel_map",
86 NULL
87 };
88
89 struct output {
90 struct userdata *userdata;
91
92 pa_sink *sink;
93 pa_sink_input *sink_input;
94
95 pa_asyncmsgq *inq, /* Message queue from the sink thread to this sink input */
96 *outq; /* Message queue from this sink input to the sink thread */
97 pa_rtpoll_item *inq_rtpoll_item_read, *inq_rtpoll_item_write;
98 pa_rtpoll_item *outq_rtpoll_item_read, *outq_rtpoll_item_write;
99
100 pa_memblockq *memblockq;
101
102 pa_usec_t total_latency;
103
104 pa_atomic_t max_request;
105
106 PA_LLIST_FIELDS(struct output);
107 };
108
109 struct userdata {
110 pa_core *core;
111 pa_module *module;
112 pa_sink *sink;
113
114 pa_thread *thread;
115 pa_thread_mq thread_mq;
116 pa_rtpoll *rtpoll;
117
118 pa_time_event *time_event;
119 uint32_t adjust_time;
120
121 pa_bool_t automatic;
122
123 pa_hook_slot *sink_put_slot, *sink_unlink_slot, *sink_state_changed_slot;
124
125 pa_resample_method_t resample_method;
126
127 struct timeval adjust_timestamp;
128
129 pa_usec_t block_usec;
130
131 pa_idxset* outputs; /* managed in main context */
132
133 struct {
134 PA_LLIST_HEAD(struct output, active_outputs); /* managed in IO thread context */
135 pa_atomic_t running; /* we cache that value here, so that every thread can query it cheaply */
136 pa_usec_t timestamp;
137 pa_bool_t in_null_mode;
138 pa_smoother *smoother;
139 uint64_t counter;
140 } thread_info;
141 };
142
143 enum {
144 SINK_MESSAGE_ADD_OUTPUT = PA_SINK_MESSAGE_MAX,
145 SINK_MESSAGE_REMOVE_OUTPUT,
146 SINK_MESSAGE_NEED,
147 SINK_MESSAGE_UPDATE_LATENCY,
148 SINK_MESSAGE_UPDATE_MAX_REQUEST
149 };
150
151 enum {
152 SINK_INPUT_MESSAGE_POST = PA_SINK_INPUT_MESSAGE_MAX,
153 };
154
155 static void output_free(struct output *o);
156 static int output_create_sink_input(struct output *o);
157
158 static void adjust_rates(struct userdata *u) {
159 struct output *o;
160 pa_usec_t max_sink_latency = 0, min_total_latency = (pa_usec_t) -1, target_latency, avg_total_latency = 0;
161 uint32_t base_rate;
162 uint32_t idx;
163 unsigned n = 0;
164
165 pa_assert(u);
166 pa_sink_assert_ref(u->sink);
167
168 if (pa_idxset_size(u->outputs) <= 0)
169 return;
170
171 if (!PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)))
172 return;
173
174 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
175 pa_usec_t sink_latency;
176
177 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
178 continue;
179
180 o->total_latency = pa_sink_input_get_latency(o->sink_input, &sink_latency);
181 o->total_latency += sink_latency;
182
183 if (sink_latency > max_sink_latency)
184 max_sink_latency = sink_latency;
185
186 if (min_total_latency == (pa_usec_t) -1 || o->total_latency < min_total_latency)
187 min_total_latency = o->total_latency;
188
189 avg_total_latency += o->total_latency;
190 n++;
191 }
192
193 if (min_total_latency == (pa_usec_t) -1)
194 return;
195
196 avg_total_latency /= n;
197
198 target_latency = max_sink_latency > min_total_latency ? max_sink_latency : min_total_latency;
199
200 pa_log_info("[%s] avg total latency is %0.2f msec.", u->sink->name, (double) avg_total_latency / PA_USEC_PER_MSEC);
201 pa_log_info("[%s] target latency is %0.2f msec.", u->sink->name, (double) target_latency / PA_USEC_PER_MSEC);
202
203 base_rate = u->sink->sample_spec.rate;
204
205 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
206 uint32_t r = base_rate;
207
208 if (!o->sink_input || !PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
209 continue;
210
211 if (o->total_latency < target_latency)
212 r -= (uint32_t) ((((double) (target_latency - o->total_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
213 else if (o->total_latency > target_latency)
214 r += (uint32_t) ((((double) (o->total_latency - target_latency))/(double)u->adjust_time)*(double)r/PA_USEC_PER_SEC);
215
216 if (r < (uint32_t) (base_rate*0.9) || r > (uint32_t) (base_rate*1.1)) {
217 pa_log_warn("[%s] sample rates too different, not adjusting (%u vs. %u).", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), base_rate, r);
218 pa_sink_input_set_rate(o->sink_input, base_rate);
219 } else {
220 pa_log_info("[%s] new rate is %u Hz; ratio is %0.3f; latency is %0.0f usec.", pa_proplist_gets(o->sink_input->proplist, PA_PROP_MEDIA_NAME), r, (double) r / base_rate, (float) o->total_latency);
221 pa_sink_input_set_rate(o->sink_input, r);
222 }
223 }
224
225 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_UPDATE_LATENCY, NULL, (int64_t) avg_total_latency, NULL);
226 }
227
228 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
229 struct userdata *u = userdata;
230
231 pa_assert(u);
232 pa_assert(a);
233 pa_assert(u->time_event == e);
234
235 adjust_rates(u);
236
237 pa_core_rttime_restart(u->core, e, pa_rtclock_now() + u->adjust_time * PA_USEC_PER_SEC);
238 }
239
240 static void process_render_null(struct userdata *u, pa_usec_t now) {
241 size_t ate = 0;
242 pa_assert(u);
243
244 if (u->thread_info.in_null_mode)
245 u->thread_info.timestamp = now;
246
247 while (u->thread_info.timestamp < now + u->block_usec) {
248 pa_memchunk chunk;
249
250 pa_sink_render(u->sink, u->sink->thread_info.max_request, &chunk);
251 pa_memblock_unref(chunk.memblock);
252
253 u->thread_info.counter += chunk.length;
254
255 /* pa_log_debug("Ate %lu bytes.", (unsigned long) chunk.length); */
256 u->thread_info.timestamp += pa_bytes_to_usec(chunk.length, &u->sink->sample_spec);
257
258 ate += chunk.length;
259
260 if (ate >= u->sink->thread_info.max_request)
261 break;
262 }
263
264 /* pa_log_debug("Ate in sum %lu bytes (of %lu)", (unsigned long) ate, (unsigned long) nbytes); */
265
266 pa_smoother_put(u->thread_info.smoother, now,
267 pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec) - (u->thread_info.timestamp - now));
268 }
269
270 static void thread_func(void *userdata) {
271 struct userdata *u = userdata;
272
273 pa_assert(u);
274
275 pa_log_debug("Thread starting up");
276
277 if (u->core->realtime_scheduling)
278 pa_make_realtime(u->core->realtime_priority+1);
279
280 pa_thread_mq_install(&u->thread_mq);
281
282 u->thread_info.timestamp = pa_rtclock_now();
283 u->thread_info.in_null_mode = FALSE;
284
285 for (;;) {
286 int ret;
287
288 if (PA_SINK_IS_OPENED(u->sink->thread_info.state))
289 if (u->sink->thread_info.rewind_requested)
290 pa_sink_process_rewind(u->sink, 0);
291
292 /* If no outputs are connected, render some data and drop it immediately. */
293 if (PA_SINK_IS_OPENED(u->sink->thread_info.state) && !u->thread_info.active_outputs) {
294 pa_usec_t now;
295
296 now = pa_rtclock_now();
297
298 if (!u->thread_info.in_null_mode || u->thread_info.timestamp <= now)
299 process_render_null(u, now);
300
301 pa_rtpoll_set_timer_absolute(u->rtpoll, u->thread_info.timestamp);
302 u->thread_info.in_null_mode = TRUE;
303 } else {
304 pa_rtpoll_set_timer_disabled(u->rtpoll);
305 u->thread_info.in_null_mode = FALSE;
306 }
307
308 /* Hmm, nothing to do. Let's sleep */
309 if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) {
310 pa_log_info("pa_rtpoll_run() = %i", ret);
311 goto fail;
312 }
313
314 if (ret == 0)
315 goto finish;
316 }
317
318 fail:
319 /* If this was no regular exit from the loop we have to continue
320 * processing messages until we received PA_MESSAGE_SHUTDOWN */
321 pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
322 pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
323
324 finish:
325 pa_log_debug("Thread shutting down");
326 }
327
328 /* Called from I/O thread context */
329 static void render_memblock(struct userdata *u, struct output *o, size_t length) {
330 pa_assert(u);
331 pa_assert(o);
332
333 /* We are run by the sink thread, on behalf of an output (o). The
334 * output is waiting for us, hence it is safe to access its
335 * mainblockq and asyncmsgq directly. */
336
337 /* If we are not running, we cannot produce any data */
338 if (!pa_atomic_load(&u->thread_info.running))
339 return;
340
341 /* Maybe there's some data in the requesting output's queue
342 * now? */
343 while (pa_asyncmsgq_process_one(o->inq) > 0)
344 ;
345
346 /* Ok, now let's prepare some data if we really have to */
347 while (!pa_memblockq_is_readable(o->memblockq)) {
348 struct output *j;
349 pa_memchunk chunk;
350
351 /* Render data! */
352 pa_sink_render(u->sink, length, &chunk);
353
354 u->thread_info.counter += chunk.length;
355
356 /* OK, let's send this data to the other threads */
357 for (j = u->thread_info.active_outputs; j; j = j->next)
358
359 /* Send to other outputs, which are not the requesting
360 * one */
361
362 if (j != o)
363 pa_asyncmsgq_post(j->inq, PA_MSGOBJECT(j->sink_input), SINK_INPUT_MESSAGE_POST, NULL, 0, &chunk, NULL);
364
365 /* And place it directly into the requesting output's queue */
366 if (o)
367 pa_memblockq_push_align(o->memblockq, &chunk);
368
369 pa_memblock_unref(chunk.memblock);
370 }
371 }
372
373 /* Called from I/O thread context */
374 static void request_memblock(struct output *o, size_t length) {
375 pa_assert(o);
376 pa_sink_input_assert_ref(o->sink_input);
377 pa_sink_assert_ref(o->userdata->sink);
378
379 /* If another thread already prepared some data we received
380 * the data over the asyncmsgq, hence let's first process
381 * it. */
382 while (pa_asyncmsgq_process_one(o->inq) > 0)
383 ;
384
385 /* Check whether we're now readable */
386 if (pa_memblockq_is_readable(o->memblockq))
387 return;
388
389 /* OK, we need to prepare new data, but only if the sink is actually running */
390 if (pa_atomic_load(&o->userdata->thread_info.running))
391 pa_asyncmsgq_send(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_NEED, o, (int64_t) length, NULL);
392 }
393
394 /* Called from I/O thread context */
395 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
396 struct output *o;
397
398 pa_sink_input_assert_ref(i);
399 pa_assert_se(o = i->userdata);
400
401 /* If necessary, get some new data */
402 request_memblock(o, nbytes);
403
404 if (pa_memblockq_peek(o->memblockq, chunk) < 0)
405 return -1;
406
407 pa_memblockq_drop(o->memblockq, chunk->length);
408 return 0;
409 }
410
411 /* Called from I/O thread context */
412 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
413 struct output *o;
414
415 pa_sink_input_assert_ref(i);
416 pa_assert_se(o = i->userdata);
417
418 pa_memblockq_rewind(o->memblockq, nbytes);
419 }
420
421 /* Called from I/O thread context */
422 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
423 struct output *o;
424
425 pa_sink_input_assert_ref(i);
426 pa_assert_se(o = i->userdata);
427
428 pa_memblockq_set_maxrewind(o->memblockq, nbytes);
429 }
430
431 /* Called from I/O thread context */
432 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
433 struct output *o;
434
435 pa_sink_input_assert_ref(i);
436 pa_assert_se(o = i->userdata);
437
438 if (pa_atomic_load(&o->max_request) == (int) nbytes)
439 return;
440
441 pa_atomic_store(&o->max_request, (int) nbytes);
442
443 pa_asyncmsgq_post(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_UPDATE_MAX_REQUEST, NULL, 0, NULL, NULL);
444 }
445
446 /* Called from I/O thread context */
447 static void sink_input_attach_cb(pa_sink_input *i) {
448 struct output *o;
449
450 pa_sink_input_assert_ref(i);
451 pa_assert_se(o = i->userdata);
452
453 /* Set up the queue from the sink thread to us */
454 pa_assert(!o->inq_rtpoll_item_read && !o->outq_rtpoll_item_write);
455
456 o->inq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
457 i->sink->rtpoll,
458 PA_RTPOLL_LATE, /* This one is not that important, since we check for data in _peek() anyway. */
459 o->inq);
460
461 o->outq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
462 i->sink->rtpoll,
463 PA_RTPOLL_EARLY,
464 o->outq);
465 }
466
467 /* Called from I/O thread context */
468 static void sink_input_detach_cb(pa_sink_input *i) {
469 struct output *o;
470
471 pa_sink_input_assert_ref(i);
472 pa_assert_se(o = i->userdata);
473
474 /* Shut down the queue from the sink thread to us */
475 pa_assert(o->inq_rtpoll_item_read && o->outq_rtpoll_item_write);
476
477 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
478 o->inq_rtpoll_item_read = NULL;
479
480 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
481 o->outq_rtpoll_item_write = NULL;
482 }
483
484 /* Called from main context */
485 static void sink_input_kill_cb(pa_sink_input *i) {
486 struct output *o;
487
488 pa_sink_input_assert_ref(i);
489 pa_assert_se(o = i->userdata);
490
491 pa_module_unload_request(o->userdata->module, TRUE);
492 output_free(o);
493 }
494
495 /* Called from IO thread context */
496 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
497 struct userdata *u;
498
499 pa_sink_input_assert_ref(i);
500 pa_assert_se(u = i->userdata);
501
502 /* If we are added for the first time, ask for a rewinding so that
503 * we are heard right-away. */
504 if (PA_SINK_INPUT_IS_LINKED(state) &&
505 i->thread_info.state == PA_SINK_INPUT_INIT)
506 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
507 }
508
509 /* Called from thread context */
510 static int sink_input_process_msg(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
511 struct output *o = PA_SINK_INPUT(obj)->userdata;
512
513 switch (code) {
514
515 case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
516 pa_usec_t *r = data;
517
518 *r = pa_bytes_to_usec(pa_memblockq_get_length(o->memblockq), &o->sink_input->sample_spec);
519
520 /* Fall through, the default handler will add in the extra
521 * latency added by the resampler */
522 break;
523 }
524
525 case SINK_INPUT_MESSAGE_POST:
526
527 if (PA_SINK_IS_OPENED(o->sink_input->sink->thread_info.state))
528 pa_memblockq_push_align(o->memblockq, chunk);
529 else
530 pa_memblockq_flush_write(o->memblockq);
531
532 return 0;
533 }
534
535 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
536 }
537
538 /* Called from main context */
539 static void disable_output(struct output *o) {
540 pa_assert(o);
541
542 if (!o->sink_input)
543 return;
544
545 pa_sink_input_unlink(o->sink_input);
546 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_REMOVE_OUTPUT, o, 0, NULL);
547 pa_sink_input_unref(o->sink_input);
548 o->sink_input = NULL;
549 }
550
551 /* Called from main context */
552 static void enable_output(struct output *o) {
553 pa_assert(o);
554
555 if (o->sink_input)
556 return;
557
558 if (output_create_sink_input(o) >= 0) {
559
560 pa_memblockq_flush_write(o->memblockq);
561
562 pa_sink_input_put(o->sink_input);
563
564 if (o->userdata->sink && PA_SINK_IS_LINKED(pa_sink_get_state(o->userdata->sink)))
565 pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
566 }
567 }
568
569 /* Called from main context */
570 static void suspend(struct userdata *u) {
571 struct output *o;
572 uint32_t idx;
573
574 pa_assert(u);
575
576 /* Let's suspend by unlinking all streams */
577 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
578 disable_output(o);
579
580 pa_log_info("Device suspended...");
581 }
582
583 /* Called from main context */
584 static void unsuspend(struct userdata *u) {
585 struct output *o;
586 uint32_t idx;
587
588 pa_assert(u);
589
590 /* Let's resume */
591 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
592
593 pa_sink_suspend(o->sink, FALSE, PA_SUSPEND_IDLE);
594
595 if (PA_SINK_IS_OPENED(pa_sink_get_state(o->sink)))
596 enable_output(o);
597 }
598
599 pa_log_info("Resumed successfully...");
600 }
601
602 /* Called from main context */
603 static int sink_set_state(pa_sink *sink, pa_sink_state_t state) {
604 struct userdata *u;
605
606 pa_sink_assert_ref(sink);
607 pa_assert_se(u = sink->userdata);
608
609 /* Please note that in contrast to the ALSA modules we call
610 * suspend/unsuspend from main context here! */
611
612 switch (state) {
613 case PA_SINK_SUSPENDED:
614 pa_assert(PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)));
615
616 suspend(u);
617 break;
618
619 case PA_SINK_IDLE:
620 case PA_SINK_RUNNING:
621
622 if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED)
623 unsuspend(u);
624
625 break;
626
627 case PA_SINK_UNLINKED:
628 case PA_SINK_INIT:
629 case PA_SINK_INVALID_STATE:
630 ;
631 }
632
633 return 0;
634 }
635
636 /* Called from IO context */
637 static void update_max_request(struct userdata *u) {
638 size_t max_request = 0;
639 struct output *o;
640
641 for (o = u->thread_info.active_outputs; o; o = o->next) {
642 size_t mr = (size_t) pa_atomic_load(&o->max_request);
643
644 if (mr > max_request)
645 max_request = mr;
646 }
647
648 if (max_request <= 0)
649 max_request = pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec);
650
651 pa_sink_set_max_request_within_thread(u->sink, max_request);
652 }
653
654 /* Called from thread context of the io thread */
655 static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
656 struct userdata *u = PA_SINK(o)->userdata;
657
658 switch (code) {
659
660 case PA_SINK_MESSAGE_SET_STATE:
661 pa_atomic_store(&u->thread_info.running, PA_PTR_TO_UINT(data) == PA_SINK_RUNNING);
662
663 if (PA_PTR_TO_UINT(data) == PA_SINK_SUSPENDED)
664 pa_smoother_pause(u->thread_info.smoother, pa_rtclock_now());
665 else
666 pa_smoother_resume(u->thread_info.smoother, pa_rtclock_now(), TRUE);
667
668 break;
669
670 case PA_SINK_MESSAGE_GET_LATENCY: {
671 pa_usec_t x, y, c, *delay = data;
672
673 x = pa_rtclock_now();
674 y = pa_smoother_get(u->thread_info.smoother, x);
675
676 c = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
677
678 if (y < c)
679 *delay = c - y;
680 else
681 *delay = 0;
682
683 return 0;
684 }
685
686 case SINK_MESSAGE_ADD_OUTPUT: {
687 struct output *op = data;
688
689 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, op);
690
691 pa_assert(!op->outq_rtpoll_item_read && !op->inq_rtpoll_item_write);
692
693 op->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
694 u->rtpoll,
695 PA_RTPOLL_EARLY-1, /* This item is very important */
696 op->outq);
697 op->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
698 u->rtpoll,
699 PA_RTPOLL_EARLY,
700 op->inq);
701
702 update_max_request(u);
703 return 0;
704 }
705
706 case SINK_MESSAGE_REMOVE_OUTPUT: {
707 struct output *op = data;
708
709 PA_LLIST_REMOVE(struct output, u->thread_info.active_outputs, op);
710
711 pa_assert(op->outq_rtpoll_item_read && op->inq_rtpoll_item_write);
712
713 pa_rtpoll_item_free(op->outq_rtpoll_item_read);
714 op->outq_rtpoll_item_read = NULL;
715
716 pa_rtpoll_item_free(op->inq_rtpoll_item_write);
717 op->inq_rtpoll_item_write = NULL;
718
719 update_max_request(u);
720 return 0;
721 }
722
723 case SINK_MESSAGE_NEED:
724 render_memblock(u, (struct output*) data, (size_t) offset);
725 return 0;
726
727 case SINK_MESSAGE_UPDATE_LATENCY: {
728 pa_usec_t x, y, latency = (pa_usec_t) offset;
729
730 x = pa_rtclock_now();
731 y = pa_bytes_to_usec(u->thread_info.counter, &u->sink->sample_spec);
732
733 if (y > latency)
734 y -= latency;
735 else
736 y = 0;
737
738 pa_smoother_put(u->thread_info.smoother, x, y);
739 return 0;
740 }
741
742 case SINK_MESSAGE_UPDATE_MAX_REQUEST:
743
744 update_max_request(u);
745 break;
746 }
747
748 return pa_sink_process_msg(o, code, data, offset, chunk);
749 }
750
751 static void update_description(struct userdata *u) {
752 pa_bool_t first = TRUE;
753 char *t;
754 struct output *o;
755 uint32_t idx;
756
757 pa_assert(u);
758
759 if (pa_idxset_isempty(u->outputs)) {
760 pa_sink_set_description(u->sink, "Simultaneous output");
761 return;
762 }
763
764 t = pa_xstrdup("Simultaneous output to");
765
766 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
767 char *e;
768
769 if (first) {
770 e = pa_sprintf_malloc("%s %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
771 first = FALSE;
772 } else
773 e = pa_sprintf_malloc("%s, %s", t, pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
774
775 pa_xfree(t);
776 t = e;
777 }
778
779 pa_sink_set_description(u->sink, t);
780 pa_xfree(t);
781 }
782
783 static int output_create_sink_input(struct output *o) {
784 pa_sink_input_new_data data;
785
786 pa_assert(o);
787
788 if (o->sink_input)
789 return 0;
790
791 pa_sink_input_new_data_init(&data);
792 data.sink = o->sink;
793 data.driver = __FILE__;
794 pa_proplist_setf(data.proplist, PA_PROP_MEDIA_NAME, "Simultaneous output on %s", pa_strnull(pa_proplist_gets(o->sink->proplist, PA_PROP_DEVICE_DESCRIPTION)));
795 pa_proplist_sets(data.proplist, PA_PROP_MEDIA_ROLE, "filter");
796 pa_sink_input_new_data_set_sample_spec(&data, &o->userdata->sink->sample_spec);
797 pa_sink_input_new_data_set_channel_map(&data, &o->userdata->sink->channel_map);
798 data.module = o->userdata->module;
799 data.resample_method = o->userdata->resample_method;
800
801 pa_sink_input_new(&o->sink_input, o->userdata->core, &data, PA_SINK_INPUT_VARIABLE_RATE|PA_SINK_INPUT_DONT_MOVE);
802
803 pa_sink_input_new_data_done(&data);
804
805 if (!o->sink_input)
806 return -1;
807
808 o->sink_input->parent.process_msg = sink_input_process_msg;
809 o->sink_input->pop = sink_input_pop_cb;
810 o->sink_input->process_rewind = sink_input_process_rewind_cb;
811 o->sink_input->state_change = sink_input_state_change_cb;
812 o->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
813 o->sink_input->update_max_request = sink_input_update_max_request_cb;
814 o->sink_input->attach = sink_input_attach_cb;
815 o->sink_input->detach = sink_input_detach_cb;
816 o->sink_input->kill = sink_input_kill_cb;
817 o->sink_input->userdata = o;
818
819 pa_sink_input_set_requested_latency(o->sink_input, BLOCK_USEC);
820
821 return 0;
822 }
823
824 static struct output *output_new(struct userdata *u, pa_sink *sink) {
825 struct output *o;
826 pa_sink_state_t state;
827
828 pa_assert(u);
829 pa_assert(sink);
830 pa_assert(u->sink);
831
832 o = pa_xnew(struct output, 1);
833 o->userdata = u;
834 o->inq = pa_asyncmsgq_new(0);
835 o->outq = pa_asyncmsgq_new(0);
836 o->inq_rtpoll_item_write = o->inq_rtpoll_item_read = NULL;
837 o->outq_rtpoll_item_write = o->outq_rtpoll_item_read = NULL;
838 o->sink = sink;
839 o->sink_input = NULL;
840 o->memblockq = pa_memblockq_new(
841 0,
842 MEMBLOCKQ_MAXLENGTH,
843 MEMBLOCKQ_MAXLENGTH,
844 pa_frame_size(&u->sink->sample_spec),
845 1,
846 0,
847 0,
848 NULL);
849 pa_atomic_store(&o->max_request, 0);
850 PA_LLIST_INIT(struct output, o);
851
852 pa_assert_se(pa_idxset_put(u->outputs, o, NULL) == 0);
853
854 state = pa_sink_get_state(u->sink);
855
856 if (state != PA_SINK_INIT)
857 pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
858 else {
859 /* If the sink is not yet started, we need to do the activation ourselves */
860 PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, o);
861
862 o->outq_rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
863 u->rtpoll,
864 PA_RTPOLL_EARLY-1, /* This item is very important */
865 o->outq);
866 o->inq_rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
867 u->rtpoll,
868 PA_RTPOLL_EARLY,
869 o->inq);
870 }
871
872 if (PA_SINK_IS_OPENED(state) || state == PA_SINK_INIT) {
873 pa_sink_suspend(sink, FALSE, PA_SUSPEND_IDLE);
874
875 if (PA_SINK_IS_OPENED(pa_sink_get_state(sink)))
876 if (output_create_sink_input(o) < 0)
877 goto fail;
878 }
879
880 update_description(u);
881
882 return o;
883
884 fail:
885
886 if (o) {
887 pa_idxset_remove_by_data(u->outputs, o, NULL);
888
889 if (o->sink_input) {
890 pa_sink_input_unlink(o->sink_input);
891 pa_sink_input_unref(o->sink_input);
892 }
893
894 if (o->memblockq)
895 pa_memblockq_free(o->memblockq);
896
897 if (o->inq)
898 pa_asyncmsgq_unref(o->inq);
899
900 if (o->outq)
901 pa_asyncmsgq_unref(o->outq);
902
903 pa_xfree(o);
904 }
905
906 return NULL;
907 }
908
909 static pa_bool_t is_suitable_sink(struct userdata *u, pa_sink *s) {
910 const char *t;
911
912 pa_sink_assert_ref(s);
913
914 if (!(s->flags & PA_SINK_HARDWARE))
915 return FALSE;
916
917 if (s == u->sink)
918 return FALSE;
919
920 if ((t = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_CLASS)))
921 if (strcmp(t, "sound"))
922 return FALSE;
923
924 return TRUE;
925 }
926
927 static pa_hook_result_t sink_put_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
928 struct output *o;
929
930 pa_core_assert_ref(c);
931 pa_sink_assert_ref(s);
932 pa_assert(u);
933 pa_assert(u->automatic);
934
935 if (!is_suitable_sink(u, s))
936 return PA_HOOK_OK;
937
938 pa_log_info("Configuring new sink: %s", s->name);
939
940 if (!(o = output_new(u, s))) {
941 pa_log("Failed to create sink input on sink '%s'.", s->name);
942 return PA_HOOK_OK;
943 }
944
945 if (o->sink_input)
946 pa_sink_input_put(o->sink_input);
947
948 return PA_HOOK_OK;
949 }
950
951 static struct output* find_output(struct userdata *u, pa_sink *s) {
952 struct output *o;
953 uint32_t idx;
954
955 pa_assert(u);
956 pa_assert(s);
957
958 if (u->sink == s)
959 return NULL;
960
961 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
962 if (o->sink == s)
963 return o;
964
965 return NULL;
966 }
967
968 static pa_hook_result_t sink_unlink_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
969 struct output *o;
970
971 pa_assert(c);
972 pa_sink_assert_ref(s);
973 pa_assert(u);
974
975 if (!(o = find_output(u, s)))
976 return PA_HOOK_OK;
977
978 pa_log_info("Unconfiguring sink: %s", s->name);
979
980 output_free(o);
981
982 return PA_HOOK_OK;
983 }
984
985 static pa_hook_result_t sink_state_changed_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
986 struct output *o;
987 pa_sink_state_t state;
988
989 if (!(o = find_output(u, s)))
990 return PA_HOOK_OK;
991
992 state = pa_sink_get_state(s);
993
994 if (PA_SINK_IS_OPENED(state) && PA_SINK_IS_OPENED(pa_sink_get_state(u->sink)) && !o->sink_input)
995 enable_output(o);
996
997 if (state == PA_SINK_SUSPENDED && o->sink_input)
998 disable_output(o);
999
1000 return PA_HOOK_OK;
1001 }
1002
1003 int pa__init(pa_module*m) {
1004 struct userdata *u;
1005 pa_modargs *ma = NULL;
1006 const char *slaves, *rm;
1007 int resample_method = PA_RESAMPLER_TRIVIAL;
1008 pa_sample_spec ss;
1009 pa_channel_map map;
1010 struct output *o;
1011 uint32_t idx;
1012 pa_sink_new_data data;
1013
1014 pa_assert(m);
1015
1016 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1017 pa_log("failed to parse module arguments");
1018 goto fail;
1019 }
1020
1021 if ((rm = pa_modargs_get_value(ma, "resample_method", NULL))) {
1022 if ((resample_method = pa_parse_resample_method(rm)) < 0) {
1023 pa_log("invalid resample method '%s'", rm);
1024 goto fail;
1025 }
1026 }
1027
1028 m->userdata = u = pa_xnew(struct userdata, 1);
1029 u->core = m->core;
1030 u->module = m;
1031 u->sink = NULL;
1032 u->time_event = NULL;
1033 u->adjust_time = DEFAULT_ADJUST_TIME;
1034 u->rtpoll = pa_rtpoll_new();
1035 pa_thread_mq_init(&u->thread_mq, m->core->mainloop, u->rtpoll);
1036 u->thread = NULL;
1037 u->resample_method = resample_method;
1038 u->outputs = pa_idxset_new(NULL, NULL);
1039 memset(&u->adjust_timestamp, 0, sizeof(u->adjust_timestamp));
1040 u->sink_put_slot = u->sink_unlink_slot = u->sink_state_changed_slot = NULL;
1041 PA_LLIST_HEAD_INIT(struct output, u->thread_info.active_outputs);
1042 pa_atomic_store(&u->thread_info.running, FALSE);
1043 u->thread_info.in_null_mode = FALSE;
1044 u->thread_info.counter = 0;
1045 u->thread_info.smoother = pa_smoother_new(
1046 PA_USEC_PER_SEC,
1047 PA_USEC_PER_SEC*2,
1048 TRUE,
1049 TRUE,
1050 10,
1051 0,
1052 FALSE);
1053
1054 if (pa_modargs_get_value_u32(ma, "adjust_time", &u->adjust_time) < 0) {
1055 pa_log("Failed to parse adjust_time value");
1056 goto fail;
1057 }
1058
1059 slaves = pa_modargs_get_value(ma, "slaves", NULL);
1060 u->automatic = !slaves;
1061
1062 ss = m->core->default_sample_spec;
1063 map = m->core->default_channel_map;
1064 if ((pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0)) {
1065 pa_log("Invalid sample specification.");
1066 goto fail;
1067 }
1068
1069 pa_sink_new_data_init(&data);
1070 data.namereg_fail = FALSE;
1071 data.driver = __FILE__;
1072 data.module = m;
1073 pa_sink_new_data_set_name(&data, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME));
1074 pa_sink_new_data_set_sample_spec(&data, &ss);
1075 pa_sink_new_data_set_channel_map(&data, &map);
1076 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Simultaneous Output");
1077 pa_proplist_sets(data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1078
1079 if (slaves)
1080 pa_proplist_sets(data.proplist, "combine.slaves", slaves);
1081
1082 if (pa_modargs_get_proplist(ma, "sink_properties", data.proplist, PA_UPDATE_REPLACE) < 0) {
1083 pa_log("Invalid properties");
1084 pa_sink_new_data_done(&data);
1085 goto fail;
1086 }
1087
1088 u->sink = pa_sink_new(m->core, &data, PA_SINK_LATENCY);
1089 pa_sink_new_data_done(&data);
1090
1091 if (!u->sink) {
1092 pa_log("Failed to create sink");
1093 goto fail;
1094 }
1095
1096 u->sink->parent.process_msg = sink_process_msg;
1097 u->sink->set_state = sink_set_state;
1098 u->sink->userdata = u;
1099
1100 pa_sink_set_rtpoll(u->sink, u->rtpoll);
1101 pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
1102
1103 u->block_usec = BLOCK_USEC;
1104 pa_sink_set_max_request(u->sink, pa_usec_to_bytes(u->block_usec, &u->sink->sample_spec));
1105
1106 if (!u->automatic) {
1107 const char*split_state;
1108 char *n = NULL;
1109 pa_assert(slaves);
1110
1111 /* The slaves have been specified manually */
1112
1113 split_state = NULL;
1114 while ((n = pa_split(slaves, ",", &split_state))) {
1115 pa_sink *slave_sink;
1116
1117 if (!(slave_sink = pa_namereg_get(m->core, n, PA_NAMEREG_SINK)) || slave_sink == u->sink) {
1118 pa_log("Invalid slave sink '%s'", n);
1119 pa_xfree(n);
1120 goto fail;
1121 }
1122
1123 pa_xfree(n);
1124
1125 if (!output_new(u, slave_sink)) {
1126 pa_log("Failed to create slave sink input on sink '%s'.", slave_sink->name);
1127 goto fail;
1128 }
1129 }
1130
1131 if (pa_idxset_size(u->outputs) <= 1)
1132 pa_log_warn("No slave sinks specified.");
1133
1134 u->sink_put_slot = NULL;
1135
1136 } else {
1137 pa_sink *s;
1138
1139 /* We're in automatic mode, we add every sink that matches our needs */
1140
1141 for (s = pa_idxset_first(m->core->sinks, &idx); s; s = pa_idxset_next(m->core->sinks, &idx)) {
1142
1143 if (!is_suitable_sink(u, s))
1144 continue;
1145
1146 if (!output_new(u, s)) {
1147 pa_log("Failed to create sink input on sink '%s'.", s->name);
1148 goto fail;
1149 }
1150 }
1151
1152 u->sink_put_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_PUT], PA_HOOK_LATE, (pa_hook_cb_t) sink_put_hook_cb, u);
1153 }
1154
1155 u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], PA_HOOK_EARLY, (pa_hook_cb_t) sink_unlink_hook_cb, u);
1156 u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], PA_HOOK_NORMAL, (pa_hook_cb_t) sink_state_changed_hook_cb, u);
1157
1158 if (!(u->thread = pa_thread_new(thread_func, u))) {
1159 pa_log("Failed to create thread.");
1160 goto fail;
1161 }
1162
1163 /* Activate the sink and the sink inputs */
1164 pa_sink_put(u->sink);
1165
1166 for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
1167 if (o->sink_input)
1168 pa_sink_input_put(o->sink_input);
1169
1170 if (u->adjust_time > 0)
1171 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time * PA_USEC_PER_SEC, time_callback, u);
1172
1173 pa_modargs_free(ma);
1174
1175 return 0;
1176
1177 fail:
1178
1179 if (ma)
1180 pa_modargs_free(ma);
1181
1182 pa__done(m);
1183
1184 return -1;
1185 }
1186
1187 static void output_free(struct output *o) {
1188 pa_assert(o);
1189
1190 disable_output(o);
1191
1192 pa_assert_se(pa_idxset_remove_by_data(o->userdata->outputs, o, NULL));
1193
1194 update_description(o->userdata);
1195
1196 if (o->inq_rtpoll_item_read)
1197 pa_rtpoll_item_free(o->inq_rtpoll_item_read);
1198 if (o->inq_rtpoll_item_write)
1199 pa_rtpoll_item_free(o->inq_rtpoll_item_write);
1200
1201 if (o->outq_rtpoll_item_read)
1202 pa_rtpoll_item_free(o->outq_rtpoll_item_read);
1203 if (o->outq_rtpoll_item_write)
1204 pa_rtpoll_item_free(o->outq_rtpoll_item_write);
1205
1206 if (o->inq)
1207 pa_asyncmsgq_unref(o->inq);
1208
1209 if (o->outq)
1210 pa_asyncmsgq_unref(o->outq);
1211
1212 if (o->memblockq)
1213 pa_memblockq_free(o->memblockq);
1214
1215 pa_xfree(o);
1216 }
1217
1218 void pa__done(pa_module*m) {
1219 struct userdata *u;
1220 struct output *o;
1221
1222 pa_assert(m);
1223
1224 if (!(u = m->userdata))
1225 return;
1226
1227 if (u->sink_put_slot)
1228 pa_hook_slot_free(u->sink_put_slot);
1229
1230 if (u->sink_unlink_slot)
1231 pa_hook_slot_free(u->sink_unlink_slot);
1232
1233 if (u->sink_state_changed_slot)
1234 pa_hook_slot_free(u->sink_state_changed_slot);
1235
1236 if (u->outputs) {
1237 while ((o = pa_idxset_first(u->outputs, NULL)))
1238 output_free(o);
1239
1240 pa_idxset_free(u->outputs, NULL, NULL);
1241 }
1242
1243 if (u->sink)
1244 pa_sink_unlink(u->sink);
1245
1246 if (u->thread) {
1247 pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
1248 pa_thread_free(u->thread);
1249 }
1250
1251 pa_thread_mq_done(&u->thread_mq);
1252
1253 if (u->sink)
1254 pa_sink_unref(u->sink);
1255
1256 if (u->rtpoll)
1257 pa_rtpoll_free(u->rtpoll);
1258
1259 if (u->time_event)
1260 u->core->mainloop->time_free(u->time_event);
1261
1262 if (u->thread_info.smoother)
1263 pa_smoother_free(u->thread_info.smoother);
1264
1265 pa_xfree(u);
1266 }