]> code.delx.au - pulseaudio/blob - src/modules/echo-cancel/module-echo-cancel.c
memblockq: Improve debuggability by storing a name and a sample spec.
[pulseaudio] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
9
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
17
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
28
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <stdio.h>
34
35 #include "echo-cancel.h"
36
37 #include <pulse/xmalloc.h>
38 #include <pulse/timeval.h>
39 #include <pulse/rtclock.h>
40
41 #include <pulsecore/i18n.h>
42 #include <pulsecore/atomic.h>
43 #include <pulsecore/macro.h>
44 #include <pulsecore/namereg.h>
45 #include <pulsecore/sink.h>
46 #include <pulsecore/module.h>
47 #include <pulsecore/core-rtclock.h>
48 #include <pulsecore/core-util.h>
49 #include <pulsecore/modargs.h>
50 #include <pulsecore/log.h>
51 #include <pulsecore/rtpoll.h>
52 #include <pulsecore/sample-util.h>
53 #include <pulsecore/ltdl-helper.h>
54
55 #include "module-echo-cancel-symdef.h"
56
57 PA_MODULE_AUTHOR("Wim Taymans");
58 PA_MODULE_DESCRIPTION("Echo Cancellation");
59 PA_MODULE_VERSION(PACKAGE_VERSION);
60 PA_MODULE_LOAD_ONCE(FALSE);
61 PA_MODULE_USAGE(
62 _("source_name=<name for the source> "
63 "source_properties=<properties for the source> "
64 "source_master=<name of source to filter> "
65 "sink_name=<name for the sink> "
66 "sink_properties=<properties for the sink> "
67 "sink_master=<name of sink to filter> "
68 "adjust_time=<how often to readjust rates in s> "
69 "format=<sample format> "
70 "rate=<sample rate> "
71 "channels=<number of channels> "
72 "channel_map=<channel map> "
73 "aec_method=<implementation to use> "
74 "aec_args=<parameters for the AEC engine> "
75 "save_aec=<save AEC data in /tmp> "
76 "autoloaded=<set if this module is being loaded automatically> "
77 "use_volume_sharing=<yes or no> "
78 ));
79
80 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
81 typedef enum {
82 PA_ECHO_CANCELLER_INVALID = -1,
83 PA_ECHO_CANCELLER_SPEEX = 0,
84 PA_ECHO_CANCELLER_ADRIAN,
85 } pa_echo_canceller_method_t;
86
87 #define DEFAULT_ECHO_CANCELLER "speex"
88
89 static const pa_echo_canceller ec_table[] = {
90 {
91 /* Speex */
92 .init = pa_speex_ec_init,
93 .run = pa_speex_ec_run,
94 .done = pa_speex_ec_done,
95 },
96 {
97 /* Adrian Andre's NLMS implementation */
98 .init = pa_adrian_ec_init,
99 .run = pa_adrian_ec_run,
100 .done = pa_adrian_ec_done,
101 },
102 };
103
104 #define DEFAULT_RATE 32000
105 #define DEFAULT_CHANNELS 1
106 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
107 #define DEFAULT_SAVE_AEC FALSE
108 #define DEFAULT_AUTOLOADED FALSE
109
110 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
111
112 /* This module creates a new (virtual) source and sink.
113 *
114 * The data sent to the new sink is kept in a memblockq before being
115 * forwarded to the real sink_master.
116 *
117 * Data read from source_master is matched against the saved sink data and
118 * echo canceled data is then pushed onto the new source.
119 *
120 * Both source and sink masters have their own threads to push/pull data
121 * respectively. We however perform all our actions in the source IO thread.
122 * To do this we send all played samples to the source IO thread where they
123 * are then pushed into the memblockq.
124 *
125 * Alignment is performed in two steps:
126 *
127 * 1) when something happens that requires quick adjustment of the alignment of
128 * capture and playback samples, we perform a resync. This adjusts the
129 * position in the playback memblock to the requested sample. Quick
130 * adjustments include moving the playback samples before the capture
131 * samples (because else the echo canceler does not work) or when the
132 * playback pointer drifts too far away.
133 *
134 * 2) periodically check the difference between capture and playback. we use a
135 * low and high watermark for adjusting the alignment. playback should always
136 * be before capture and the difference should not be bigger than one frame
137 * size. We would ideally like to resample the sink_input but most driver
138 * don't give enough accuracy to be able to do that right now.
139 */
140
141 struct snapshot {
142 pa_usec_t sink_now;
143 pa_usec_t sink_latency;
144 size_t sink_delay;
145 int64_t send_counter;
146
147 pa_usec_t source_now;
148 pa_usec_t source_latency;
149 size_t source_delay;
150 int64_t recv_counter;
151 size_t rlen;
152 size_t plen;
153 };
154
155 struct userdata {
156 pa_core *core;
157 pa_module *module;
158
159 pa_bool_t autoloaded;
160 pa_bool_t dead;
161 pa_bool_t save_aec;
162
163 pa_echo_canceller *ec;
164 uint32_t blocksize;
165
166 pa_bool_t need_realign;
167
168 /* to wakeup the source I/O thread */
169 pa_bool_t in_push;
170 pa_asyncmsgq *asyncmsgq;
171 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
172
173 pa_source *source;
174 pa_bool_t source_auto_desc;
175 pa_source_output *source_output;
176 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
177 size_t source_skip;
178
179 pa_sink *sink;
180 pa_bool_t sink_auto_desc;
181 pa_sink_input *sink_input;
182 pa_memblockq *sink_memblockq;
183 int64_t send_counter; /* updated in sink IO thread */
184 int64_t recv_counter;
185 size_t sink_skip;
186
187 pa_atomic_t request_resync;
188
189 int active_mask;
190 pa_time_event *time_event;
191 pa_usec_t adjust_time;
192
193 FILE *captured_file;
194 FILE *played_file;
195 FILE *canceled_file;
196 };
197
198 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
199
200 static const char* const valid_modargs[] = {
201 "source_name",
202 "source_properties",
203 "source_master",
204 "sink_name",
205 "sink_properties",
206 "sink_master",
207 "adjust_time",
208 "format",
209 "rate",
210 "channels",
211 "channel_map",
212 "aec_method",
213 "aec_args",
214 "save_aec",
215 "autoloaded",
216 "use_volume_sharing",
217 NULL
218 };
219
220 enum {
221 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
222 SOURCE_OUTPUT_MESSAGE_REWIND,
223 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
224 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
225 };
226
227 enum {
228 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
229 };
230
231 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
232 int64_t buffer, diff_time, buffer_latency;
233
234 /* get the number of samples between capture and playback */
235 if (snapshot->plen > snapshot->rlen)
236 buffer = snapshot->plen - snapshot->rlen;
237 else
238 buffer = 0;
239
240 buffer += snapshot->source_delay + snapshot->sink_delay;
241
242 /* add the amount of samples not yet transferred to the source context */
243 if (snapshot->recv_counter <= snapshot->send_counter)
244 buffer += (int64_t) (snapshot->send_counter - snapshot->recv_counter);
245 else
246 buffer += PA_CLIP_SUB(buffer, (int64_t) (snapshot->recv_counter - snapshot->send_counter));
247
248 /* convert to time */
249 buffer_latency = pa_bytes_to_usec(buffer, &u->source_output->sample_spec);
250
251 /* capture and playback samples are perfectly aligned when diff_time is 0 */
252 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
253 (snapshot->source_now - snapshot->source_latency);
254
255 pa_log_debug("diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
256 (long long) snapshot->sink_latency,
257 (long long) buffer_latency, (long long) snapshot->source_latency,
258 (long long) snapshot->source_delay, (long long) snapshot->sink_delay,
259 (long long) (snapshot->send_counter - snapshot->recv_counter),
260 (long long) (snapshot->sink_now - snapshot->source_now));
261
262 return diff_time;
263 }
264
265 /* Called from main context */
266 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
267 struct userdata *u = userdata;
268 uint32_t old_rate, base_rate, new_rate;
269 int64_t diff_time;
270 /*size_t fs*/
271 struct snapshot latency_snapshot;
272
273 pa_assert(u);
274 pa_assert(a);
275 pa_assert(u->time_event == e);
276 pa_assert_ctl_context();
277
278 if (u->active_mask != 3)
279 return;
280
281 /* update our snapshots */
282 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
283 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
284
285 /* calculate drift between capture and playback */
286 diff_time = calc_diff(u, &latency_snapshot);
287
288 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
289 old_rate = u->sink_input->sample_spec.rate;
290 base_rate = u->source_output->sample_spec.rate;
291
292 if (diff_time < 0) {
293 /* recording before playback, we need to adjust quickly. The echo
294 * canceler does not work in this case. */
295 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
296 NULL, diff_time, NULL, NULL);
297 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
298 new_rate = base_rate;
299 }
300 else {
301 if (diff_time > 1000) {
302 /* diff too big, quickly adjust */
303 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
304 NULL, diff_time, NULL, NULL);
305 }
306
307 /* recording behind playback, we need to slowly adjust the rate to match */
308 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
309
310 /* assume equal samplerates for now */
311 new_rate = base_rate;
312 }
313
314 /* make sure we don't make too big adjustments because that sounds horrible */
315 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
316 new_rate = base_rate;
317
318 if (new_rate != old_rate) {
319 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
320
321 pa_sink_input_set_rate(u->sink_input, new_rate);
322 }
323
324 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
325 }
326
327 /* Called from source I/O thread context */
328 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
329 struct userdata *u = PA_SOURCE(o)->userdata;
330
331 switch (code) {
332
333 case PA_SOURCE_MESSAGE_GET_LATENCY:
334
335 /* The source is _put() before the source output is, so let's
336 * make sure we don't access it in that time. Also, the
337 * source output is first shut down, the source second. */
338 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
339 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
340 *((pa_usec_t*) data) = 0;
341 return 0;
342 }
343
344 *((pa_usec_t*) data) =
345
346 /* Get the latency of the master source */
347 pa_source_get_latency_within_thread(u->source_output->source) +
348 /* Add the latency internal to our source output on top */
349 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
350 /* and the buffering we do on the source */
351 pa_bytes_to_usec(u->blocksize, &u->source_output->source->sample_spec);
352
353 return 0;
354
355 }
356
357 return pa_source_process_msg(o, code, data, offset, chunk);
358 }
359
360 /* Called from sink I/O thread context */
361 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
362 struct userdata *u = PA_SINK(o)->userdata;
363
364 switch (code) {
365
366 case PA_SINK_MESSAGE_GET_LATENCY:
367
368 /* The sink is _put() before the sink input is, so let's
369 * make sure we don't access it in that time. Also, the
370 * sink input is first shut down, the sink second. */
371 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
372 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
373 *((pa_usec_t*) data) = 0;
374 return 0;
375 }
376
377 *((pa_usec_t*) data) =
378
379 /* Get the latency of the master sink */
380 pa_sink_get_latency_within_thread(u->sink_input->sink) +
381
382 /* Add the latency internal to our sink input on top */
383 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
384
385 return 0;
386 }
387
388 return pa_sink_process_msg(o, code, data, offset, chunk);
389 }
390
391
392 /* Called from main context */
393 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
394 struct userdata *u;
395
396 pa_source_assert_ref(s);
397 pa_assert_se(u = s->userdata);
398
399 if (!PA_SOURCE_IS_LINKED(state) ||
400 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
401 return 0;
402
403 pa_log_debug("Source state %d %d", state, u->active_mask);
404
405 if (state == PA_SOURCE_RUNNING) {
406 /* restart timer when both sink and source are active */
407 u->active_mask |= 1;
408 if (u->active_mask == 3)
409 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
410
411 pa_atomic_store(&u->request_resync, 1);
412 pa_source_output_cork(u->source_output, FALSE);
413 } else if (state == PA_SOURCE_SUSPENDED) {
414 u->active_mask &= ~1;
415 pa_source_output_cork(u->source_output, TRUE);
416 }
417 return 0;
418 }
419
420 /* Called from main context */
421 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
422 struct userdata *u;
423
424 pa_sink_assert_ref(s);
425 pa_assert_se(u = s->userdata);
426
427 if (!PA_SINK_IS_LINKED(state) ||
428 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
429 return 0;
430
431 pa_log_debug("Sink state %d %d", state, u->active_mask);
432
433 if (state == PA_SINK_RUNNING) {
434 /* restart timer when both sink and source are active */
435 u->active_mask |= 2;
436 if (u->active_mask == 3)
437 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
438
439 pa_atomic_store(&u->request_resync, 1);
440 pa_sink_input_cork(u->sink_input, FALSE);
441 } else if (state == PA_SINK_SUSPENDED) {
442 u->active_mask &= ~2;
443 pa_sink_input_cork(u->sink_input, TRUE);
444 }
445 return 0;
446 }
447
448 /* Called from I/O thread context */
449 static void source_update_requested_latency_cb(pa_source *s) {
450 struct userdata *u;
451
452 pa_source_assert_ref(s);
453 pa_assert_se(u = s->userdata);
454
455 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
456 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
457 return;
458
459 pa_log_debug("Source update requested latency");
460
461 /* Just hand this one over to the master source */
462 pa_source_output_set_requested_latency_within_thread(
463 u->source_output,
464 pa_source_get_requested_latency_within_thread(s));
465 }
466
467 /* Called from I/O thread context */
468 static void sink_update_requested_latency_cb(pa_sink *s) {
469 struct userdata *u;
470
471 pa_sink_assert_ref(s);
472 pa_assert_se(u = s->userdata);
473
474 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
475 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
476 return;
477
478 pa_log_debug("Sink update requested latency");
479
480 /* Just hand this one over to the master sink */
481 pa_sink_input_set_requested_latency_within_thread(
482 u->sink_input,
483 pa_sink_get_requested_latency_within_thread(s));
484 }
485
486 /* Called from I/O thread context */
487 static void sink_request_rewind_cb(pa_sink *s) {
488 struct userdata *u;
489
490 pa_sink_assert_ref(s);
491 pa_assert_se(u = s->userdata);
492
493 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
494 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
495 return;
496
497 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
498
499 /* Just hand this one over to the master sink */
500 pa_sink_input_request_rewind(u->sink_input,
501 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
502 }
503
504 /* Called from main context */
505 static void source_set_volume_cb(pa_source *s) {
506 struct userdata *u;
507
508 pa_source_assert_ref(s);
509 pa_assert_se(u = s->userdata);
510
511 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
512 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
513 return;
514
515 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
516 }
517
518 /* Called from main context */
519 static void sink_set_volume_cb(pa_sink *s) {
520 struct userdata *u;
521
522 pa_sink_assert_ref(s);
523 pa_assert_se(u = s->userdata);
524
525 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
526 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
527 return;
528
529 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
530 }
531
532 static void source_get_volume_cb(pa_source *s) {
533 struct userdata *u;
534 pa_cvolume v;
535
536 pa_source_assert_ref(s);
537 pa_assert_se(u = s->userdata);
538
539 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
540 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
541 return;
542
543 pa_source_output_get_volume(u->source_output, &v, TRUE);
544
545 if (pa_cvolume_equal(&s->real_volume, &v))
546 /* no change */
547 return;
548
549 s->real_volume = v;
550 pa_source_set_soft_volume(s, NULL);
551 }
552
553 /* Called from main context */
554 static void source_set_mute_cb(pa_source *s) {
555 struct userdata *u;
556
557 pa_source_assert_ref(s);
558 pa_assert_se(u = s->userdata);
559
560 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
561 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
562 return;
563
564 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
565 }
566
567 /* Called from main context */
568 static void sink_set_mute_cb(pa_sink *s) {
569 struct userdata *u;
570
571 pa_sink_assert_ref(s);
572 pa_assert_se(u = s->userdata);
573
574 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
575 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
576 return;
577
578 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
579 }
580
581 /* Called from main context */
582 static void source_get_mute_cb(pa_source *s) {
583 struct userdata *u;
584
585 pa_source_assert_ref(s);
586 pa_assert_se(u = s->userdata);
587
588 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
589 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
590 return;
591
592 pa_source_output_get_mute(u->source_output);
593 }
594
595 /* must be called from the input thread context */
596 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
597 int64_t diff;
598
599 if (diff_time < 0) {
600 diff = pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec);
601
602 if (diff > 0) {
603 /* add some extra safety samples to compensate for jitter in the
604 * timings */
605 diff += 10 * pa_frame_size (&u->source_output->sample_spec);
606
607 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
608
609 u->sink_skip = diff;
610 u->source_skip = 0;
611 }
612 } else if (diff_time > 0) {
613 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
614
615 if (diff > 0) {
616 pa_log("playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
617
618 u->source_skip = diff;
619 u->sink_skip = 0;
620 }
621 }
622 }
623
624 /* must be called from the input thread */
625 static void do_resync(struct userdata *u) {
626 int64_t diff_time;
627 struct snapshot latency_snapshot;
628
629 pa_log("Doing resync");
630
631 /* update our snapshot */
632 source_output_snapshot_within_thread(u, &latency_snapshot);
633 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
634
635 /* calculate drift between capture and playback */
636 diff_time = calc_diff(u, &latency_snapshot);
637
638 /* and adjust for the drift */
639 apply_diff_time(u, diff_time);
640 }
641
642 /* Called from input thread context */
643 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
644 struct userdata *u;
645 size_t rlen, plen;
646
647 pa_source_output_assert_ref(o);
648 pa_source_output_assert_io_context(o);
649 pa_assert_se(u = o->userdata);
650
651 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
652 pa_log("push when no link?");
653 return;
654 }
655
656 /* handle queued messages */
657 u->in_push = TRUE;
658 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
659 ;
660 u->in_push = FALSE;
661
662 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
663 do_resync(u);
664 }
665
666 pa_memblockq_push_align(u->source_memblockq, chunk);
667
668 rlen = pa_memblockq_get_length(u->source_memblockq);
669 plen = pa_memblockq_get_length(u->sink_memblockq);
670
671 while (rlen >= u->blocksize) {
672 pa_memchunk rchunk, pchunk;
673
674 /* take fixed block from recorded samples */
675 pa_memblockq_peek_fixed_size(u->source_memblockq, u->blocksize, &rchunk);
676
677 if (plen > u->blocksize && u->source_skip == 0) {
678 uint8_t *rdata, *pdata, *cdata;
679 pa_memchunk cchunk;
680 int unused;
681
682 if (u->sink_skip) {
683 size_t to_skip;
684
685 if (u->sink_skip > plen)
686 to_skip = plen;
687 else
688 to_skip = u->sink_skip;
689
690 pa_memblockq_drop(u->sink_memblockq, to_skip);
691 plen -= to_skip;
692
693 u->sink_skip -= to_skip;
694 }
695
696 if (plen > u->blocksize && u->sink_skip == 0) {
697 /* take fixed block from played samples */
698 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->blocksize, &pchunk);
699
700 rdata = pa_memblock_acquire(rchunk.memblock);
701 rdata += rchunk.index;
702 pdata = pa_memblock_acquire(pchunk.memblock);
703 pdata += pchunk.index;
704
705 cchunk.index = 0;
706 cchunk.length = u->blocksize;
707 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
708 cdata = pa_memblock_acquire(cchunk.memblock);
709
710 if (u->save_aec) {
711 if (u->captured_file)
712 unused = fwrite(rdata, 1, u->blocksize, u->captured_file);
713 if (u->played_file)
714 unused = fwrite(pdata, 1, u->blocksize, u->played_file);
715 }
716
717 /* perform echo cancellation */
718 u->ec->run(u->ec, rdata, pdata, cdata);
719
720 if (u->save_aec) {
721 if (u->canceled_file)
722 unused = fwrite(cdata, 1, u->blocksize, u->canceled_file);
723 }
724
725 pa_memblock_release(cchunk.memblock);
726 pa_memblock_release(pchunk.memblock);
727 pa_memblock_release(rchunk.memblock);
728
729 /* drop consumed sink samples */
730 pa_memblockq_drop(u->sink_memblockq, u->blocksize);
731 pa_memblock_unref(pchunk.memblock);
732
733 pa_memblock_unref(rchunk.memblock);
734 /* the filtered samples now become the samples from our
735 * source */
736 rchunk = cchunk;
737
738 plen -= u->blocksize;
739 }
740 }
741
742 /* forward the (echo-canceled) data to the virtual source */
743 pa_source_post(u->source, &rchunk);
744 pa_memblock_unref(rchunk.memblock);
745
746 pa_memblockq_drop(u->source_memblockq, u->blocksize);
747 rlen -= u->blocksize;
748
749 if (u->source_skip) {
750 if (u->source_skip > u->blocksize) {
751 u->source_skip -= u->blocksize;
752 }
753 else {
754 u->sink_skip += (u->blocksize - u->source_skip);
755 u->source_skip = 0;
756 }
757 }
758 }
759 }
760
761 /* Called from I/O thread context */
762 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
763 struct userdata *u;
764
765 pa_sink_input_assert_ref(i);
766 pa_assert(chunk);
767 pa_assert_se(u = i->userdata);
768
769 if (u->sink->thread_info.rewind_requested)
770 pa_sink_process_rewind(u->sink, 0);
771
772 pa_sink_render_full(u->sink, nbytes, chunk);
773
774 if (i->thread_info.underrun_for > 0) {
775 pa_log_debug("Handling end of underrun.");
776 pa_atomic_store(&u->request_resync, 1);
777 }
778
779 /* let source thread handle the chunk. pass the sample count as well so that
780 * the source IO thread can update the right variables. */
781 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
782 NULL, 0, chunk, NULL);
783 u->send_counter += chunk->length;
784
785 return 0;
786 }
787
788 /* Called from input thread context */
789 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
790 struct userdata *u;
791
792 pa_source_output_assert_ref(o);
793 pa_source_output_assert_io_context(o);
794 pa_assert_se(u = o->userdata);
795
796 pa_source_process_rewind(u->source, nbytes);
797
798 /* go back on read side, we need to use older sink data for this */
799 pa_memblockq_rewind(u->sink_memblockq, nbytes);
800
801 /* manipulate write index */
802 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
803
804 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
805 (long long) pa_memblockq_get_length (u->source_memblockq));
806 }
807
808 /* Called from I/O thread context */
809 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
810 struct userdata *u;
811
812 pa_sink_input_assert_ref(i);
813 pa_assert_se(u = i->userdata);
814
815 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
816
817 pa_sink_process_rewind(u->sink, nbytes);
818
819 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
820 u->send_counter -= nbytes;
821 }
822
823 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
824 size_t delay, rlen, plen;
825 pa_usec_t now, latency;
826
827 now = pa_rtclock_now();
828 latency = pa_source_get_latency_within_thread(u->source_output->source);
829 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
830
831 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
832 rlen = pa_memblockq_get_length(u->source_memblockq);
833 plen = pa_memblockq_get_length(u->sink_memblockq);
834
835 snapshot->source_now = now;
836 snapshot->source_latency = latency;
837 snapshot->source_delay = delay;
838 snapshot->recv_counter = u->recv_counter;
839 snapshot->rlen = rlen + u->sink_skip;
840 snapshot->plen = plen + u->source_skip;
841 }
842
843
844 /* Called from output thread context */
845 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
846 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
847
848 switch (code) {
849
850 case SOURCE_OUTPUT_MESSAGE_POST:
851
852 pa_source_output_assert_io_context(u->source_output);
853
854 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
855 pa_memblockq_push_align(u->sink_memblockq, chunk);
856 else
857 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
858
859 u->recv_counter += (int64_t) chunk->length;
860
861 return 0;
862
863 case SOURCE_OUTPUT_MESSAGE_REWIND:
864 pa_source_output_assert_io_context(u->source_output);
865
866 /* manipulate write index, never go past what we have */
867 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
868 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
869 else
870 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
871
872 pa_log_debug("Sink rewind (%lld)", (long long) offset);
873
874 u->recv_counter -= offset;
875
876 return 0;
877
878 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
879 struct snapshot *snapshot = (struct snapshot *) data;
880
881 source_output_snapshot_within_thread(u, snapshot);
882 return 0;
883 }
884
885 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
886 apply_diff_time(u, offset);
887 return 0;
888
889 }
890
891 return pa_source_output_process_msg(obj, code, data, offset, chunk);
892 }
893
894 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
895 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
896
897 switch (code) {
898
899 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
900 size_t delay;
901 pa_usec_t now, latency;
902 struct snapshot *snapshot = (struct snapshot *) data;
903
904 pa_sink_input_assert_io_context(u->sink_input);
905
906 now = pa_rtclock_now();
907 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
908 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
909
910 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
911
912 snapshot->sink_now = now;
913 snapshot->sink_latency = latency;
914 snapshot->sink_delay = delay;
915 snapshot->send_counter = u->send_counter;
916 return 0;
917 }
918 }
919
920 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
921 }
922
923 /* Called from I/O thread context */
924 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
925 struct userdata *u;
926
927 pa_sink_input_assert_ref(i);
928 pa_assert_se(u = i->userdata);
929
930 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
931
932 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
933 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
934 }
935
936 /* Called from I/O thread context */
937 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
938 struct userdata *u;
939
940 pa_source_output_assert_ref(o);
941 pa_assert_se(u = o->userdata);
942
943 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
944
945 pa_source_set_max_rewind_within_thread(u->source, nbytes);
946 }
947
948 /* Called from I/O thread context */
949 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
950 struct userdata *u;
951
952 pa_sink_input_assert_ref(i);
953 pa_assert_se(u = i->userdata);
954
955 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
956
957 pa_sink_set_max_request_within_thread(u->sink, nbytes);
958 }
959
960 /* Called from I/O thread context */
961 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
962 struct userdata *u;
963 pa_usec_t latency;
964
965 pa_sink_input_assert_ref(i);
966 pa_assert_se(u = i->userdata);
967
968 latency = pa_sink_get_requested_latency_within_thread(i->sink);
969
970 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
971 }
972
973 /* Called from I/O thread context */
974 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
975 struct userdata *u;
976 pa_usec_t latency;
977
978 pa_source_output_assert_ref(o);
979 pa_assert_se(u = o->userdata);
980
981 latency = pa_source_get_requested_latency_within_thread(o->source);
982
983 pa_log_debug("source output update requested latency %lld", (long long) latency);
984 }
985
986 /* Called from I/O thread context */
987 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
988 struct userdata *u;
989
990 pa_sink_input_assert_ref(i);
991 pa_assert_se(u = i->userdata);
992
993 pa_log_debug("Sink input update latency range %lld %lld",
994 (long long) i->sink->thread_info.min_latency,
995 (long long) i->sink->thread_info.max_latency);
996
997 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
998 }
999
1000 /* Called from I/O thread context */
1001 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1002 struct userdata *u;
1003
1004 pa_source_output_assert_ref(o);
1005 pa_assert_se(u = o->userdata);
1006
1007 pa_log_debug("Source output update latency range %lld %lld",
1008 (long long) o->source->thread_info.min_latency,
1009 (long long) o->source->thread_info.max_latency);
1010
1011 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1012 }
1013
1014 /* Called from I/O thread context */
1015 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1016 struct userdata *u;
1017
1018 pa_sink_input_assert_ref(i);
1019 pa_assert_se(u = i->userdata);
1020
1021 pa_log_debug("Sink input update fixed latency %lld",
1022 (long long) i->sink->thread_info.fixed_latency);
1023
1024 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1025 }
1026
1027 /* Called from I/O thread context */
1028 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1029 struct userdata *u;
1030
1031 pa_source_output_assert_ref(o);
1032 pa_assert_se(u = o->userdata);
1033
1034 pa_log_debug("Source output update fixed latency %lld",
1035 (long long) o->source->thread_info.fixed_latency);
1036
1037 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1038 }
1039
1040 /* Called from output thread context */
1041 static void source_output_attach_cb(pa_source_output *o) {
1042 struct userdata *u;
1043
1044 pa_source_output_assert_ref(o);
1045 pa_source_output_assert_io_context(o);
1046 pa_assert_se(u = o->userdata);
1047
1048 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1049 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1050 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1051 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1052
1053 pa_log_debug("Source output %d attach", o->index);
1054
1055 pa_source_attach_within_thread(u->source);
1056
1057 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1058 o->source->thread_info.rtpoll,
1059 PA_RTPOLL_LATE,
1060 u->asyncmsgq);
1061 }
1062
1063 /* Called from I/O thread context */
1064 static void sink_input_attach_cb(pa_sink_input *i) {
1065 struct userdata *u;
1066
1067 pa_sink_input_assert_ref(i);
1068 pa_assert_se(u = i->userdata);
1069
1070 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1071 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1072
1073 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1074 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1075 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1076
1077 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1078 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1079 * HERE. SEE (6) */
1080 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1081 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1082
1083 pa_log_debug("Sink input %d attach", i->index);
1084
1085 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1086 i->sink->thread_info.rtpoll,
1087 PA_RTPOLL_LATE,
1088 u->asyncmsgq);
1089
1090 pa_sink_attach_within_thread(u->sink);
1091 }
1092
1093
1094 /* Called from output thread context */
1095 static void source_output_detach_cb(pa_source_output *o) {
1096 struct userdata *u;
1097
1098 pa_source_output_assert_ref(o);
1099 pa_source_output_assert_io_context(o);
1100 pa_assert_se(u = o->userdata);
1101
1102 pa_source_detach_within_thread(u->source);
1103 pa_source_set_rtpoll(u->source, NULL);
1104
1105 pa_log_debug("Source output %d detach", o->index);
1106
1107 if (u->rtpoll_item_read) {
1108 pa_rtpoll_item_free(u->rtpoll_item_read);
1109 u->rtpoll_item_read = NULL;
1110 }
1111 }
1112
1113 /* Called from I/O thread context */
1114 static void sink_input_detach_cb(pa_sink_input *i) {
1115 struct userdata *u;
1116
1117 pa_sink_input_assert_ref(i);
1118 pa_assert_se(u = i->userdata);
1119
1120 pa_sink_detach_within_thread(u->sink);
1121
1122 pa_sink_set_rtpoll(u->sink, NULL);
1123
1124 pa_log_debug("Sink input %d detach", i->index);
1125
1126 if (u->rtpoll_item_write) {
1127 pa_rtpoll_item_free(u->rtpoll_item_write);
1128 u->rtpoll_item_write = NULL;
1129 }
1130 }
1131
1132 /* Called from output thread context */
1133 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1134 struct userdata *u;
1135
1136 pa_source_output_assert_ref(o);
1137 pa_source_output_assert_io_context(o);
1138 pa_assert_se(u = o->userdata);
1139
1140 pa_log_debug("Source output %d state %d", o->index, state);
1141 }
1142
1143 /* Called from IO thread context */
1144 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1145 struct userdata *u;
1146
1147 pa_sink_input_assert_ref(i);
1148 pa_assert_se(u = i->userdata);
1149
1150 pa_log_debug("Sink input %d state %d", i->index, state);
1151
1152 /* If we are added for the first time, ask for a rewinding so that
1153 * we are heard right-away. */
1154 if (PA_SINK_INPUT_IS_LINKED(state) &&
1155 i->thread_info.state == PA_SINK_INPUT_INIT) {
1156 pa_log_debug("Requesting rewind due to state change.");
1157 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1158 }
1159 }
1160
1161 /* Called from main thread */
1162 static void source_output_kill_cb(pa_source_output *o) {
1163 struct userdata *u;
1164
1165 pa_source_output_assert_ref(o);
1166 pa_assert_ctl_context();
1167 pa_assert_se(u = o->userdata);
1168
1169 u->dead = TRUE;
1170
1171 /* The order here matters! We first kill the source output, followed
1172 * by the source. That means the source callbacks must be protected
1173 * against an unconnected source output! */
1174 pa_source_output_unlink(u->source_output);
1175 pa_source_unlink(u->source);
1176
1177 pa_source_output_unref(u->source_output);
1178 u->source_output = NULL;
1179
1180 pa_source_unref(u->source);
1181 u->source = NULL;
1182
1183 pa_log_debug("Source output kill %d", o->index);
1184
1185 pa_module_unload_request(u->module, TRUE);
1186 }
1187
1188 /* Called from main context */
1189 static void sink_input_kill_cb(pa_sink_input *i) {
1190 struct userdata *u;
1191
1192 pa_sink_input_assert_ref(i);
1193 pa_assert_se(u = i->userdata);
1194
1195 u->dead = TRUE;
1196
1197 /* The order here matters! We first kill the sink input, followed
1198 * by the sink. That means the sink callbacks must be protected
1199 * against an unconnected sink input! */
1200 pa_sink_input_unlink(u->sink_input);
1201 pa_sink_unlink(u->sink);
1202
1203 pa_sink_input_unref(u->sink_input);
1204 u->sink_input = NULL;
1205
1206 pa_sink_unref(u->sink);
1207 u->sink = NULL;
1208
1209 pa_log_debug("Sink input kill %d", i->index);
1210
1211 pa_module_unload_request(u->module, TRUE);
1212 }
1213
1214 /* Called from main thread */
1215 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1216 struct userdata *u;
1217
1218 pa_source_output_assert_ref(o);
1219 pa_assert_ctl_context();
1220 pa_assert_se(u = o->userdata);
1221
1222 if (u->dead)
1223 return FALSE;
1224
1225 return (u->source != dest) && (u->sink != dest->monitor_of);
1226 }
1227
1228 /* Called from main context */
1229 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1230 struct userdata *u;
1231
1232 pa_sink_input_assert_ref(i);
1233 pa_assert_se(u = i->userdata);
1234
1235 if (u->dead)
1236 return FALSE;
1237
1238 return u->sink != dest;
1239 }
1240
1241 /* Called from main thread */
1242 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1243 struct userdata *u;
1244
1245 pa_source_output_assert_ref(o);
1246 pa_assert_ctl_context();
1247 pa_assert_se(u = o->userdata);
1248
1249 if (dest) {
1250 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1251 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1252 } else
1253 pa_source_set_asyncmsgq(u->source, NULL);
1254
1255 if (u->source_auto_desc && dest) {
1256 const char *z;
1257 pa_proplist *pl;
1258
1259 pl = pa_proplist_new();
1260 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1261 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s",
1262 pa_proplist_gets(u->source->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1263
1264 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1265 pa_proplist_free(pl);
1266 }
1267 }
1268
1269 /* Called from main context */
1270 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1271 struct userdata *u;
1272
1273 pa_sink_input_assert_ref(i);
1274 pa_assert_se(u = i->userdata);
1275
1276 if (dest) {
1277 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1278 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1279 } else
1280 pa_sink_set_asyncmsgq(u->sink, NULL);
1281
1282 if (u->sink_auto_desc && dest) {
1283 const char *z;
1284 pa_proplist *pl;
1285
1286 pl = pa_proplist_new();
1287 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1288 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s",
1289 pa_proplist_gets(u->sink->proplist, "device.echo-cancel.name"), z ? z : dest->name);
1290
1291 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1292 pa_proplist_free(pl);
1293 }
1294 }
1295
1296 /* Called from main context */
1297 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1298 struct userdata *u;
1299
1300 pa_sink_input_assert_ref(i);
1301 pa_assert_se(u = i->userdata);
1302
1303 pa_sink_volume_changed(u->sink, &i->volume);
1304 }
1305
1306 /* Called from main context */
1307 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1308 struct userdata *u;
1309
1310 pa_sink_input_assert_ref(i);
1311 pa_assert_se(u = i->userdata);
1312
1313 pa_sink_mute_changed(u->sink, i->muted);
1314 }
1315
1316 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1317 if (pa_streq(method, "speex"))
1318 return PA_ECHO_CANCELLER_SPEEX;
1319 else if (pa_streq(method, "adrian"))
1320 return PA_ECHO_CANCELLER_ADRIAN;
1321 else
1322 return PA_ECHO_CANCELLER_INVALID;
1323 }
1324
1325 int pa__init(pa_module*m) {
1326 struct userdata *u;
1327 pa_sample_spec source_ss, sink_ss;
1328 pa_channel_map source_map, sink_map;
1329 pa_modargs *ma;
1330 pa_source *source_master=NULL;
1331 pa_sink *sink_master=NULL;
1332 pa_source_output_new_data source_output_data;
1333 pa_sink_input_new_data sink_input_data;
1334 pa_source_new_data source_data;
1335 pa_sink_new_data sink_data;
1336 pa_memchunk silence;
1337 pa_echo_canceller_method_t ec_method;
1338 uint32_t adjust_time_sec;
1339 pa_bool_t use_volume_sharing = TRUE;
1340
1341 pa_assert(m);
1342
1343 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1344 pa_log("Failed to parse module arguments.");
1345 goto fail;
1346 }
1347
1348 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1349 pa_log("Master source not found");
1350 goto fail;
1351 }
1352 pa_assert(source_master);
1353
1354 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1355 pa_log("Master sink not found");
1356 goto fail;
1357 }
1358 pa_assert(sink_master);
1359
1360 source_ss = source_master->sample_spec;
1361 source_ss.rate = DEFAULT_RATE;
1362 source_ss.channels = DEFAULT_CHANNELS;
1363 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1364 if (pa_modargs_get_sample_spec_and_channel_map(ma, &source_ss, &source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1365 pa_log("Invalid sample format specification or channel map");
1366 goto fail;
1367 }
1368
1369 sink_ss = sink_master->sample_spec;
1370 sink_map = sink_master->channel_map;
1371
1372 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &use_volume_sharing) < 0) {
1373 pa_log("use_volume_sharing= expects a boolean argument");
1374 goto fail;
1375 }
1376
1377 u = pa_xnew0(struct userdata, 1);
1378 if (!u) {
1379 pa_log("Failed to alloc userdata");
1380 goto fail;
1381 }
1382 u->core = m->core;
1383 u->module = m;
1384 m->userdata = u;
1385 u->dead = FALSE;
1386
1387 u->ec = pa_xnew0(pa_echo_canceller, 1);
1388 if (!u->ec) {
1389 pa_log("Failed to alloc echo canceller");
1390 goto fail;
1391 }
1392
1393 if ((ec_method = get_ec_method_from_string(pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER))) < 0) {
1394 pa_log("Invalid echo canceller implementation");
1395 goto fail;
1396 }
1397
1398 u->ec->init = ec_table[ec_method].init;
1399 u->ec->run = ec_table[ec_method].run;
1400 u->ec->done = ec_table[ec_method].done;
1401
1402 adjust_time_sec = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1403 if (pa_modargs_get_value_u32(ma, "adjust_time", &adjust_time_sec) < 0) {
1404 pa_log("Failed to parse adjust_time value");
1405 goto fail;
1406 }
1407
1408 if (adjust_time_sec != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1409 u->adjust_time = adjust_time_sec * PA_USEC_PER_SEC;
1410 else
1411 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1412
1413 u->save_aec = DEFAULT_SAVE_AEC;
1414 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1415 pa_log("Failed to parse save_aec value");
1416 goto fail;
1417 }
1418
1419 u->autoloaded = DEFAULT_AUTOLOADED;
1420 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1421 pa_log("Failed to parse autoloaded value");
1422 goto fail;
1423 }
1424
1425 u->asyncmsgq = pa_asyncmsgq_new(0);
1426 u->need_realign = TRUE;
1427 if (u->ec->init) {
1428 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &u->blocksize, pa_modargs_get_value(ma, "aec_args", NULL))) {
1429 pa_log("Failed to init AEC engine");
1430 goto fail;
1431 }
1432 }
1433
1434 /* Create source */
1435 pa_source_new_data_init(&source_data);
1436 source_data.driver = __FILE__;
1437 source_data.module = m;
1438 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1439 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1440 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1441 pa_source_new_data_set_channel_map(&source_data, &source_map);
1442 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1443 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1444 if (!u->autoloaded)
1445 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1446 pa_proplist_sets(source_data.proplist, "device.echo-cancel.name", source_data.name);
1447
1448 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1449 pa_log("Invalid properties");
1450 pa_source_new_data_done(&source_data);
1451 goto fail;
1452 }
1453
1454 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1455 const char *z;
1456
1457 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1458 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Source %s on %s", source_data.name, z ? z : source_master->name);
1459 }
1460
1461 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1462 | (use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1463 pa_source_new_data_done(&source_data);
1464
1465 if (!u->source) {
1466 pa_log("Failed to create source.");
1467 goto fail;
1468 }
1469
1470 u->source->parent.process_msg = source_process_msg_cb;
1471 u->source->set_state = source_set_state_cb;
1472 u->source->update_requested_latency = source_update_requested_latency_cb;
1473 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1474 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1475 if (!use_volume_sharing) {
1476 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1477 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1478 pa_source_enable_decibel_volume(u->source, TRUE);
1479 }
1480 u->source->userdata = u;
1481
1482 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1483
1484 /* Create sink */
1485 pa_sink_new_data_init(&sink_data);
1486 sink_data.driver = __FILE__;
1487 sink_data.module = m;
1488 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1489 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1490 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1491 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1492 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1493 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1494 if (!u->autoloaded)
1495 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1496 pa_proplist_sets(sink_data.proplist, "device.echo-cancel.name", sink_data.name);
1497
1498 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1499 pa_log("Invalid properties");
1500 pa_sink_new_data_done(&sink_data);
1501 goto fail;
1502 }
1503
1504 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1505 const char *z;
1506
1507 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1508 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Echo-Cancel Sink %s on %s", sink_data.name, z ? z : sink_master->name);
1509 }
1510
1511 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1512 | (use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1513 pa_sink_new_data_done(&sink_data);
1514
1515 if (!u->sink) {
1516 pa_log("Failed to create sink.");
1517 goto fail;
1518 }
1519
1520 u->sink->parent.process_msg = sink_process_msg_cb;
1521 u->sink->set_state = sink_set_state_cb;
1522 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1523 u->sink->request_rewind = sink_request_rewind_cb;
1524 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1525 if (!use_volume_sharing) {
1526 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1527 pa_sink_enable_decibel_volume(u->sink, TRUE);
1528 }
1529 u->sink->userdata = u;
1530
1531 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1532
1533 /* Create source output */
1534 pa_source_output_new_data_init(&source_output_data);
1535 source_output_data.driver = __FILE__;
1536 source_output_data.module = m;
1537 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1538 source_output_data.destination_source = u->source;
1539 /* FIXME
1540 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1541
1542 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1543 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1544 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1545 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1546
1547 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1548 pa_source_output_new_data_done(&source_output_data);
1549
1550 if (!u->source_output)
1551 goto fail;
1552
1553 u->source_output->parent.process_msg = source_output_process_msg_cb;
1554 u->source_output->push = source_output_push_cb;
1555 u->source_output->process_rewind = source_output_process_rewind_cb;
1556 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1557 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1558 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1559 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1560 u->source_output->kill = source_output_kill_cb;
1561 u->source_output->attach = source_output_attach_cb;
1562 u->source_output->detach = source_output_detach_cb;
1563 u->source_output->state_change = source_output_state_change_cb;
1564 u->source_output->may_move_to = source_output_may_move_to_cb;
1565 u->source_output->moving = source_output_moving_cb;
1566 u->source_output->userdata = u;
1567
1568 u->source->output_from_master = u->source_output;
1569
1570 /* Create sink input */
1571 pa_sink_input_new_data_init(&sink_input_data);
1572 sink_input_data.driver = __FILE__;
1573 sink_input_data.module = m;
1574 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1575 sink_input_data.origin_sink = u->sink;
1576 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1577 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1578 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1579 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1580 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1581
1582 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1583 pa_sink_input_new_data_done(&sink_input_data);
1584
1585 if (!u->sink_input)
1586 goto fail;
1587
1588 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1589 u->sink_input->pop = sink_input_pop_cb;
1590 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1591 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1592 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1593 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1594 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1595 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1596 u->sink_input->kill = sink_input_kill_cb;
1597 u->sink_input->attach = sink_input_attach_cb;
1598 u->sink_input->detach = sink_input_detach_cb;
1599 u->sink_input->state_change = sink_input_state_change_cb;
1600 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1601 u->sink_input->moving = sink_input_moving_cb;
1602 if (!use_volume_sharing)
1603 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1604 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1605 u->sink_input->userdata = u;
1606
1607 u->sink->input_to_master = u->sink_input;
1608
1609 pa_sink_input_get_silence(u->sink_input, &silence);
1610
1611 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1612 &source_ss, 1, 1, 0, &silence);
1613 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1614 &sink_ss, 1, 1, 0, &silence);
1615
1616 pa_memblock_unref(silence.memblock);
1617
1618 if (!u->source_memblockq || !u->sink_memblockq) {
1619 pa_log("Failed to create memblockq.");
1620 goto fail;
1621 }
1622
1623 /* our source and sink are not suspended when we create them */
1624 u->active_mask = 3;
1625
1626 if (u->adjust_time > 0)
1627 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1628
1629 if (u->save_aec) {
1630 pa_log("Creating AEC files in /tmp");
1631 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1632 if (u->captured_file == NULL)
1633 perror ("fopen failed");
1634 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1635 if (u->played_file == NULL)
1636 perror ("fopen failed");
1637 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1638 if (u->canceled_file == NULL)
1639 perror ("fopen failed");
1640 }
1641
1642 pa_sink_put(u->sink);
1643 pa_source_put(u->source);
1644
1645 pa_sink_input_put(u->sink_input);
1646 pa_source_output_put(u->source_output);
1647
1648 pa_modargs_free(ma);
1649
1650 return 0;
1651
1652 fail:
1653 if (ma)
1654 pa_modargs_free(ma);
1655
1656 pa__done(m);
1657
1658 return -1;
1659 }
1660
1661 int pa__get_n_used(pa_module *m) {
1662 struct userdata *u;
1663
1664 pa_assert(m);
1665 pa_assert_se(u = m->userdata);
1666
1667 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
1668 }
1669
1670 void pa__done(pa_module*m) {
1671 struct userdata *u;
1672
1673 pa_assert(m);
1674
1675 if (!(u = m->userdata))
1676 return;
1677
1678 u->dead = TRUE;
1679
1680 /* See comments in source_output_kill_cb() above regarding
1681 * destruction order! */
1682
1683 if (u->time_event)
1684 u->core->mainloop->time_free(u->time_event);
1685
1686 if (u->source_output)
1687 pa_source_output_unlink(u->source_output);
1688 if (u->sink_input)
1689 pa_sink_input_unlink(u->sink_input);
1690
1691 if (u->source)
1692 pa_source_unlink(u->source);
1693 if (u->sink)
1694 pa_sink_unlink(u->sink);
1695
1696 if (u->source_output)
1697 pa_source_output_unref(u->source_output);
1698 if (u->sink_input)
1699 pa_sink_input_unref(u->sink_input);
1700
1701 if (u->source)
1702 pa_source_unref(u->source);
1703 if (u->sink)
1704 pa_sink_unref(u->sink);
1705
1706 if (u->source_memblockq)
1707 pa_memblockq_free(u->source_memblockq);
1708 if (u->sink_memblockq)
1709 pa_memblockq_free(u->sink_memblockq);
1710
1711 if (u->ec) {
1712 if (u->ec->done)
1713 u->ec->done(u->ec);
1714
1715 pa_xfree(u->ec);
1716 }
1717
1718 if (u->asyncmsgq)
1719 pa_asyncmsgq_unref(u->asyncmsgq);
1720
1721 pa_xfree(u);
1722 }