]> code.delx.au - pulseaudio/blob - src/modules/echo-cancel/module-echo-cancel.c
echo-cancel: Do not bypass EC implementation when play stream is empty
[pulseaudio] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
9
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
17
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
28
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <stdio.h>
34 #include <math.h>
35
36 #include "echo-cancel.h"
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
41
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
55
56 #include "module-echo-cancel-symdef.h"
57
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(FALSE);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
81
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 PA_ECHO_CANCELLER_NULL,
86 #ifdef HAVE_SPEEX
87 PA_ECHO_CANCELLER_SPEEX,
88 #endif
89 #ifdef HAVE_ADRIAN_EC
90 PA_ECHO_CANCELLER_ADRIAN,
91 #endif
92 #ifdef HAVE_WEBRTC
93 PA_ECHO_CANCELLER_WEBRTC,
94 #endif
95 } pa_echo_canceller_method_t;
96
97 #ifdef HAVE_WEBRTC
98 #define DEFAULT_ECHO_CANCELLER "webrtc"
99 #else
100 #define DEFAULT_ECHO_CANCELLER "speex"
101 #endif
102
103 static const pa_echo_canceller ec_table[] = {
104 {
105 /* Null, Dummy echo canceller (just copies data) */
106 .init = pa_null_ec_init,
107 .run = pa_null_ec_run,
108 .done = pa_null_ec_done,
109 },
110 #ifdef HAVE_SPEEX
111 {
112 /* Speex */
113 .init = pa_speex_ec_init,
114 .run = pa_speex_ec_run,
115 .done = pa_speex_ec_done,
116 },
117 #endif
118 #ifdef HAVE_ADRIAN_EC
119 {
120 /* Adrian Andre's NLMS implementation */
121 .init = pa_adrian_ec_init,
122 .run = pa_adrian_ec_run,
123 .done = pa_adrian_ec_done,
124 },
125 #endif
126 #ifdef HAVE_WEBRTC
127 {
128 /* WebRTC's audio processing engine */
129 .init = pa_webrtc_ec_init,
130 .play = pa_webrtc_ec_play,
131 .record = pa_webrtc_ec_record,
132 .set_drift = pa_webrtc_ec_set_drift,
133 .run = pa_webrtc_ec_run,
134 .done = pa_webrtc_ec_done,
135 },
136 #endif
137 };
138
139 #define DEFAULT_RATE 32000
140 #define DEFAULT_CHANNELS 1
141 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
142 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
143 #define DEFAULT_SAVE_AEC FALSE
144 #define DEFAULT_AUTOLOADED FALSE
145
146 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
147
148 /* Can only be used in main context */
149 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
150 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
151
152 /* This module creates a new (virtual) source and sink.
153 *
154 * The data sent to the new sink is kept in a memblockq before being
155 * forwarded to the real sink_master.
156 *
157 * Data read from source_master is matched against the saved sink data and
158 * echo canceled data is then pushed onto the new source.
159 *
160 * Both source and sink masters have their own threads to push/pull data
161 * respectively. We however perform all our actions in the source IO thread.
162 * To do this we send all played samples to the source IO thread where they
163 * are then pushed into the memblockq.
164 *
165 * Alignment is performed in two steps:
166 *
167 * 1) when something happens that requires quick adjustment of the alignment of
168 * capture and playback samples, we perform a resync. This adjusts the
169 * position in the playback memblock to the requested sample. Quick
170 * adjustments include moving the playback samples before the capture
171 * samples (because else the echo canceler does not work) or when the
172 * playback pointer drifts too far away.
173 *
174 * 2) periodically check the difference between capture and playback. We use a
175 * low and high watermark for adjusting the alignment. Playback should always
176 * be before capture and the difference should not be bigger than one frame
177 * size. We would ideally like to resample the sink_input but most driver
178 * don't give enough accuracy to be able to do that right now.
179 */
180
181 struct userdata;
182
183 struct pa_echo_canceller_msg {
184 pa_msgobject parent;
185 struct userdata *userdata;
186 };
187
188 PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject);
189 #define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o))
190
191 struct snapshot {
192 pa_usec_t sink_now;
193 pa_usec_t sink_latency;
194 size_t sink_delay;
195 int64_t send_counter;
196
197 pa_usec_t source_now;
198 pa_usec_t source_latency;
199 size_t source_delay;
200 int64_t recv_counter;
201 size_t rlen;
202 size_t plen;
203 };
204
205 struct userdata {
206 pa_core *core;
207 pa_module *module;
208
209 pa_bool_t autoloaded;
210 pa_bool_t dead;
211 pa_bool_t save_aec;
212
213 pa_echo_canceller *ec;
214 uint32_t source_blocksize;
215 uint32_t sink_blocksize;
216
217 pa_bool_t need_realign;
218
219 /* to wakeup the source I/O thread */
220 pa_asyncmsgq *asyncmsgq;
221 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
222
223 pa_source *source;
224 pa_bool_t source_auto_desc;
225 pa_source_output *source_output;
226 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
227 size_t source_skip;
228
229 pa_sink *sink;
230 pa_bool_t sink_auto_desc;
231 pa_sink_input *sink_input;
232 pa_memblockq *sink_memblockq;
233 int64_t send_counter; /* updated in sink IO thread */
234 int64_t recv_counter;
235 size_t sink_skip;
236
237 /* Bytes left over from previous iteration */
238 size_t sink_rem;
239 size_t source_rem;
240
241 pa_atomic_t request_resync;
242
243 pa_time_event *time_event;
244 pa_usec_t adjust_time;
245 int adjust_threshold;
246
247 FILE *captured_file;
248 FILE *played_file;
249 FILE *canceled_file;
250 FILE *drift_file;
251
252 pa_bool_t use_volume_sharing;
253
254 struct {
255 pa_cvolume current_volume;
256 } thread_info;
257 };
258
259 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
260
261 static const char* const valid_modargs[] = {
262 "source_name",
263 "source_properties",
264 "source_master",
265 "sink_name",
266 "sink_properties",
267 "sink_master",
268 "adjust_time",
269 "adjust_threshold",
270 "format",
271 "rate",
272 "channels",
273 "channel_map",
274 "aec_method",
275 "aec_args",
276 "save_aec",
277 "autoloaded",
278 "use_volume_sharing",
279 NULL
280 };
281
282 enum {
283 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
284 SOURCE_OUTPUT_MESSAGE_REWIND,
285 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
286 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
287 };
288
289 enum {
290 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
291 };
292
293 enum {
294 ECHO_CANCELLER_MESSAGE_SET_VOLUME,
295 };
296
297 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
298 int64_t diff_time, buffer_latency;
299 pa_usec_t plen, rlen, source_delay, sink_delay, recv_counter, send_counter;
300
301 /* get latency difference between playback and record */
302 plen = pa_bytes_to_usec(snapshot->plen, &u->sink_input->sample_spec);
303 rlen = pa_bytes_to_usec(snapshot->rlen, &u->source_output->sample_spec);
304 if (plen > rlen)
305 buffer_latency = plen - rlen;
306 else
307 buffer_latency = 0;
308
309 source_delay = pa_bytes_to_usec(snapshot->source_delay, &u->source_output->sample_spec);
310 sink_delay = pa_bytes_to_usec(snapshot->sink_delay, &u->sink_input->sample_spec);
311 buffer_latency += source_delay + sink_delay;
312
313 /* add the latency difference due to samples not yet transferred */
314 send_counter = pa_bytes_to_usec(snapshot->send_counter, &u->sink_input->sample_spec);
315 recv_counter = pa_bytes_to_usec(snapshot->recv_counter, &u->source_output->sample_spec);
316 if (recv_counter <= send_counter)
317 buffer_latency += (int64_t) (send_counter - recv_counter);
318 else
319 buffer_latency += PA_CLIP_SUB(buffer_latency, (int64_t) (recv_counter - send_counter));
320
321 /* capture and playback are perfectly aligned when diff_time is 0 */
322 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
323 (snapshot->source_now - snapshot->source_latency);
324
325 pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
326 (long long) snapshot->sink_latency,
327 (long long) buffer_latency, (long long) snapshot->source_latency,
328 (long long) source_delay, (long long) sink_delay,
329 (long long) (send_counter - recv_counter),
330 (long long) (snapshot->sink_now - snapshot->source_now));
331
332 return diff_time;
333 }
334
335 /* Called from main context */
336 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
337 struct userdata *u = userdata;
338 uint32_t old_rate, base_rate, new_rate;
339 int64_t diff_time;
340 /*size_t fs*/
341 struct snapshot latency_snapshot;
342
343 pa_assert(u);
344 pa_assert(a);
345 pa_assert(u->time_event == e);
346 pa_assert_ctl_context();
347
348 if (!IS_ACTIVE(u))
349 return;
350
351 /* update our snapshots */
352 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
353 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
354
355 /* calculate drift between capture and playback */
356 diff_time = calc_diff(u, &latency_snapshot);
357
358 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
359 old_rate = u->sink_input->sample_spec.rate;
360 base_rate = u->source_output->sample_spec.rate;
361
362 if (diff_time < 0) {
363 /* recording before playback, we need to adjust quickly. The echo
364 * canceler does not work in this case. */
365 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
366 NULL, diff_time, NULL, NULL);
367 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
368 new_rate = base_rate;
369 }
370 else {
371 if (diff_time > u->adjust_threshold) {
372 /* diff too big, quickly adjust */
373 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
374 NULL, diff_time, NULL, NULL);
375 }
376
377 /* recording behind playback, we need to slowly adjust the rate to match */
378 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
379
380 /* assume equal samplerates for now */
381 new_rate = base_rate;
382 }
383
384 /* make sure we don't make too big adjustments because that sounds horrible */
385 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
386 new_rate = base_rate;
387
388 if (new_rate != old_rate) {
389 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
390
391 pa_sink_input_set_rate(u->sink_input, new_rate);
392 }
393
394 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
395 }
396
397 /* Called from source I/O thread context */
398 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
399 struct userdata *u = PA_SOURCE(o)->userdata;
400
401 switch (code) {
402
403 case PA_SOURCE_MESSAGE_GET_LATENCY:
404
405 /* The source is _put() before the source output is, so let's
406 * make sure we don't access it in that time. Also, the
407 * source output is first shut down, the source second. */
408 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
409 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
410 *((pa_usec_t*) data) = 0;
411 return 0;
412 }
413
414 *((pa_usec_t*) data) =
415
416 /* Get the latency of the master source */
417 pa_source_get_latency_within_thread(u->source_output->source) +
418 /* Add the latency internal to our source output on top */
419 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
420 /* and the buffering we do on the source */
421 pa_bytes_to_usec(u->source_blocksize, &u->source_output->source->sample_spec);
422
423 return 0;
424
425 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
426 u->thread_info.current_volume = u->source->reference_volume;
427 break;
428 }
429
430 return pa_source_process_msg(o, code, data, offset, chunk);
431 }
432
433 /* Called from sink I/O thread context */
434 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
435 struct userdata *u = PA_SINK(o)->userdata;
436
437 switch (code) {
438
439 case PA_SINK_MESSAGE_GET_LATENCY:
440
441 /* The sink is _put() before the sink input is, so let's
442 * make sure we don't access it in that time. Also, the
443 * sink input is first shut down, the sink second. */
444 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
445 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
446 *((pa_usec_t*) data) = 0;
447 return 0;
448 }
449
450 *((pa_usec_t*) data) =
451
452 /* Get the latency of the master sink */
453 pa_sink_get_latency_within_thread(u->sink_input->sink) +
454
455 /* Add the latency internal to our sink input on top */
456 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
457
458 return 0;
459 }
460
461 return pa_sink_process_msg(o, code, data, offset, chunk);
462 }
463
464
465 /* Called from main context */
466 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
467 struct userdata *u;
468
469 pa_source_assert_ref(s);
470 pa_assert_se(u = s->userdata);
471
472 if (!PA_SOURCE_IS_LINKED(state) ||
473 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
474 return 0;
475
476 if (state == PA_SOURCE_RUNNING) {
477 /* restart timer when both sink and source are active */
478 if (IS_ACTIVE(u) && u->adjust_time)
479 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
480
481 pa_atomic_store(&u->request_resync, 1);
482 pa_source_output_cork(u->source_output, FALSE);
483 } else if (state == PA_SOURCE_SUSPENDED) {
484 pa_source_output_cork(u->source_output, TRUE);
485 }
486
487 return 0;
488 }
489
490 /* Called from main context */
491 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
492 struct userdata *u;
493
494 pa_sink_assert_ref(s);
495 pa_assert_se(u = s->userdata);
496
497 if (!PA_SINK_IS_LINKED(state) ||
498 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
499 return 0;
500
501 if (state == PA_SINK_RUNNING) {
502 /* restart timer when both sink and source are active */
503 if (IS_ACTIVE(u) && u->adjust_time)
504 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
505
506 pa_atomic_store(&u->request_resync, 1);
507 pa_sink_input_cork(u->sink_input, FALSE);
508 } else if (state == PA_SINK_SUSPENDED) {
509 pa_sink_input_cork(u->sink_input, TRUE);
510 }
511
512 return 0;
513 }
514
515 /* Called from source I/O thread context */
516 static void source_update_requested_latency_cb(pa_source *s) {
517 struct userdata *u;
518
519 pa_source_assert_ref(s);
520 pa_assert_se(u = s->userdata);
521
522 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
523 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
524 return;
525
526 pa_log_debug("Source update requested latency");
527
528 /* Just hand this one over to the master source */
529 pa_source_output_set_requested_latency_within_thread(
530 u->source_output,
531 pa_source_get_requested_latency_within_thread(s));
532 }
533
534 /* Called from sink I/O thread context */
535 static void sink_update_requested_latency_cb(pa_sink *s) {
536 struct userdata *u;
537
538 pa_sink_assert_ref(s);
539 pa_assert_se(u = s->userdata);
540
541 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
542 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
543 return;
544
545 pa_log_debug("Sink update requested latency");
546
547 /* Just hand this one over to the master sink */
548 pa_sink_input_set_requested_latency_within_thread(
549 u->sink_input,
550 pa_sink_get_requested_latency_within_thread(s));
551 }
552
553 /* Called from sink I/O thread context */
554 static void sink_request_rewind_cb(pa_sink *s) {
555 struct userdata *u;
556
557 pa_sink_assert_ref(s);
558 pa_assert_se(u = s->userdata);
559
560 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
561 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
562 return;
563
564 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
565
566 /* Just hand this one over to the master sink */
567 pa_sink_input_request_rewind(u->sink_input,
568 s->thread_info.rewind_nbytes, TRUE, FALSE, FALSE);
569 }
570
571 /* Called from main context */
572 static void source_set_volume_cb(pa_source *s) {
573 struct userdata *u;
574
575 pa_source_assert_ref(s);
576 pa_assert_se(u = s->userdata);
577
578 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
579 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
580 return;
581
582 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, TRUE);
583 }
584
585 /* Called from main context */
586 static void sink_set_volume_cb(pa_sink *s) {
587 struct userdata *u;
588
589 pa_sink_assert_ref(s);
590 pa_assert_se(u = s->userdata);
591
592 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
593 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
594 return;
595
596 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, TRUE);
597 }
598
599 /* Called from main context. */
600 static void source_get_volume_cb(pa_source *s) {
601 struct userdata *u;
602 pa_cvolume v;
603
604 pa_source_assert_ref(s);
605 pa_assert_se(u = s->userdata);
606
607 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
608 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
609 return;
610
611 pa_source_output_get_volume(u->source_output, &v, TRUE);
612
613 if (pa_cvolume_equal(&s->real_volume, &v))
614 /* no change */
615 return;
616
617 s->real_volume = v;
618 pa_source_set_soft_volume(s, NULL);
619 }
620
621 /* Called from main context */
622 static void source_set_mute_cb(pa_source *s) {
623 struct userdata *u;
624
625 pa_source_assert_ref(s);
626 pa_assert_se(u = s->userdata);
627
628 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
629 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
630 return;
631
632 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
633 }
634
635 /* Called from main context */
636 static void sink_set_mute_cb(pa_sink *s) {
637 struct userdata *u;
638
639 pa_sink_assert_ref(s);
640 pa_assert_se(u = s->userdata);
641
642 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
643 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
644 return;
645
646 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
647 }
648
649 /* Called from main context */
650 static void source_get_mute_cb(pa_source *s) {
651 struct userdata *u;
652
653 pa_source_assert_ref(s);
654 pa_assert_se(u = s->userdata);
655
656 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
657 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
658 return;
659
660 pa_source_output_get_mute(u->source_output);
661 }
662
663 /* Called from source I/O thread context. */
664 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
665 int64_t diff;
666
667 if (diff_time < 0) {
668 diff = pa_usec_to_bytes(-diff_time, &u->sink_input->sample_spec);
669
670 if (diff > 0) {
671 /* add some extra safety samples to compensate for jitter in the
672 * timings */
673 diff += 10 * pa_frame_size (&u->sink_input->sample_spec);
674
675 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
676
677 u->sink_skip = diff;
678 u->source_skip = 0;
679 }
680 } else if (diff_time > 0) {
681 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
682
683 if (diff > 0) {
684 pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
685
686 u->source_skip = diff;
687 u->sink_skip = 0;
688 }
689 }
690 }
691
692 /* Called from source I/O thread context. */
693 static void do_resync(struct userdata *u) {
694 int64_t diff_time;
695 struct snapshot latency_snapshot;
696
697 pa_log("Doing resync");
698
699 /* update our snapshot */
700 source_output_snapshot_within_thread(u, &latency_snapshot);
701 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
702
703 /* calculate drift between capture and playback */
704 diff_time = calc_diff(u, &latency_snapshot);
705
706 /* and adjust for the drift */
707 apply_diff_time(u, diff_time);
708 }
709
710 /* 1. Calculate drift at this point, pass to canceller
711 * 2. Push out playback samples in blocksize chunks
712 * 3. Push out capture samples in blocksize chunks
713 * 4. ???
714 * 5. Profit
715 *
716 * Called from source I/O thread context.
717 */
718 static void do_push_drift_comp(struct userdata *u) {
719 size_t rlen, plen;
720 pa_memchunk rchunk, pchunk, cchunk;
721 uint8_t *rdata, *pdata, *cdata;
722 float drift;
723 int unused PA_GCC_UNUSED;
724
725 rlen = pa_memblockq_get_length(u->source_memblockq);
726 plen = pa_memblockq_get_length(u->sink_memblockq);
727
728 /* Estimate snapshot drift as follows:
729 * pd: amount of data consumed since last time
730 * rd: amount of data consumed since last time
731 *
732 * drift = (pd - rd) / rd;
733 *
734 * We calculate pd and rd as the memblockq length less the number of
735 * samples left from the last iteration (to avoid double counting
736 * those remainder samples.
737 */
738 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
739 u->sink_rem = plen % u->sink_blocksize;
740 u->source_rem = rlen % u->source_blocksize;
741
742 /* Now let the canceller work its drift compensation magic */
743 u->ec->set_drift(u->ec, drift);
744
745 if (u->save_aec) {
746 if (u->drift_file)
747 fprintf(u->drift_file, "d %a\n", drift);
748 }
749
750 /* Send in the playback samples first */
751 while (plen >= u->sink_blocksize) {
752 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
753 pdata = pa_memblock_acquire(pchunk.memblock);
754 pdata += pchunk.index;
755
756 u->ec->play(u->ec, pdata);
757
758 if (u->save_aec) {
759 if (u->drift_file)
760 fprintf(u->drift_file, "p %d\n", u->sink_blocksize);
761 if (u->played_file)
762 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
763 }
764
765 pa_memblock_release(pchunk.memblock);
766 pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
767 pa_memblock_unref(pchunk.memblock);
768
769 plen -= u->sink_blocksize;
770 }
771
772 /* And now the capture samples */
773 while (rlen >= u->source_blocksize) {
774 pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_blocksize, &rchunk);
775
776 rdata = pa_memblock_acquire(rchunk.memblock);
777 rdata += rchunk.index;
778
779 cchunk.index = 0;
780 cchunk.length = u->source_blocksize;
781 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
782 cdata = pa_memblock_acquire(cchunk.memblock);
783
784 u->ec->record(u->ec, rdata, cdata);
785
786 if (u->save_aec) {
787 if (u->drift_file)
788 fprintf(u->drift_file, "c %d\n", u->source_blocksize);
789 if (u->captured_file)
790 unused = fwrite(rdata, 1, u->source_blocksize, u->captured_file);
791 if (u->canceled_file)
792 unused = fwrite(cdata, 1, u->source_blocksize, u->canceled_file);
793 }
794
795 pa_memblock_release(cchunk.memblock);
796 pa_memblock_release(rchunk.memblock);
797
798 pa_memblock_unref(rchunk.memblock);
799
800 pa_source_post(u->source, &cchunk);
801 pa_memblock_unref(cchunk.memblock);
802
803 pa_memblockq_drop(u->source_memblockq, u->source_blocksize);
804 rlen -= u->source_blocksize;
805 }
806 }
807
808 /* This one's simpler than the drift compensation case -- we just iterate over
809 * the capture buffer, and pass the canceller blocksize bytes of playback and
810 * capture data.
811 *
812 * Called from source I/O thread context. */
813 static void do_push(struct userdata *u) {
814 size_t rlen, plen;
815 pa_memchunk rchunk, pchunk, cchunk;
816 uint8_t *rdata, *pdata, *cdata;
817 int unused PA_GCC_UNUSED;
818
819 rlen = pa_memblockq_get_length(u->source_memblockq);
820 plen = pa_memblockq_get_length(u->sink_memblockq);
821
822 while (rlen >= u->source_blocksize) {
823
824 /* take fixed blocks from recorded and played samples */
825 pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_blocksize, &rchunk);
826 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
827
828 /* we ran out of played data and pchunk has been filled with silence bytes */
829 if (plen < u->sink_blocksize)
830 pa_memblockq_seek(u->sink_memblockq, u->sink_blocksize - plen, PA_SEEK_RELATIVE, true);
831
832 rdata = pa_memblock_acquire(rchunk.memblock);
833 rdata += rchunk.index;
834 pdata = pa_memblock_acquire(pchunk.memblock);
835 pdata += pchunk.index;
836
837 cchunk.index = 0;
838 cchunk.length = u->source_blocksize;
839 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
840 cdata = pa_memblock_acquire(cchunk.memblock);
841
842 if (u->save_aec) {
843 if (u->captured_file)
844 unused = fwrite(rdata, 1, u->source_blocksize, u->captured_file);
845 if (u->played_file)
846 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
847 }
848
849 /* perform echo cancellation */
850 u->ec->run(u->ec, rdata, pdata, cdata);
851
852 if (u->save_aec) {
853 if (u->canceled_file)
854 unused = fwrite(cdata, 1, u->source_blocksize, u->canceled_file);
855 }
856
857 pa_memblock_release(cchunk.memblock);
858 pa_memblock_release(pchunk.memblock);
859 pa_memblock_release(rchunk.memblock);
860
861 /* drop consumed source samples */
862 pa_memblockq_drop(u->source_memblockq, u->source_blocksize);
863 pa_memblock_unref(rchunk.memblock);
864 rlen -= u->source_blocksize;
865
866 /* drop consumed sink samples */
867 pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
868 pa_memblock_unref(pchunk.memblock);
869
870 if (plen >= u->sink_blocksize)
871 plen -= u->sink_blocksize;
872 else
873 plen = 0;
874
875 /* forward the (echo-canceled) data to the virtual source */
876 pa_source_post(u->source, &cchunk);
877 pa_memblock_unref(cchunk.memblock);
878 }
879 }
880
881 /* Called from source I/O thread context. */
882 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
883 struct userdata *u;
884 size_t rlen, plen, to_skip;
885 pa_memchunk rchunk;
886
887 pa_source_output_assert_ref(o);
888 pa_source_output_assert_io_context(o);
889 pa_assert_se(u = o->userdata);
890
891 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
892 pa_log("Push when no link?");
893 return;
894 }
895
896 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
897 u->sink->thread_info.state != PA_SINK_RUNNING)) {
898 pa_source_post(u->source, chunk);
899 return;
900 }
901
902 /* handle queued messages, do any message sending of our own */
903 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
904 ;
905
906 pa_memblockq_push_align(u->source_memblockq, chunk);
907
908 rlen = pa_memblockq_get_length(u->source_memblockq);
909 plen = pa_memblockq_get_length(u->sink_memblockq);
910
911 /* Let's not do anything else till we have enough data to process */
912 if (rlen < u->source_blocksize)
913 return;
914
915 /* See if we need to drop samples in order to sync */
916 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
917 do_resync(u);
918 }
919
920 /* Okay, skip cancellation for skipped source samples if needed. */
921 if (PA_UNLIKELY(u->source_skip)) {
922 /* The slightly tricky bit here is that we drop all but modulo
923 * blocksize bytes and then adjust for that last bit on the sink side.
924 * We do this because the source data is coming at a fixed rate, which
925 * means the only way to try to catch up is drop sink samples and let
926 * the canceller cope up with this. */
927 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
928 to_skip -= to_skip % u->source_blocksize;
929
930 if (to_skip) {
931 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
932 pa_source_post(u->source, &rchunk);
933
934 pa_memblock_unref(rchunk.memblock);
935 pa_memblockq_drop(u->source_memblockq, to_skip);
936
937 rlen -= to_skip;
938 u->source_skip -= to_skip;
939 }
940
941 if (rlen && u->source_skip % u->source_blocksize) {
942 u->sink_skip += (uint64_t) (u->source_blocksize - (u->source_skip % u->source_blocksize)) * u->sink_blocksize / u->source_blocksize;
943 u->source_skip -= (u->source_skip % u->source_blocksize);
944 }
945 }
946
947 /* And for the sink, these samples have been played back already, so we can
948 * just drop them and get on with it. */
949 if (PA_UNLIKELY(u->sink_skip)) {
950 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
951
952 pa_memblockq_drop(u->sink_memblockq, to_skip);
953
954 plen -= to_skip;
955 u->sink_skip -= to_skip;
956 }
957
958 /* process and push out samples */
959 if (u->ec->params.drift_compensation)
960 do_push_drift_comp(u);
961 else
962 do_push(u);
963 }
964
965 /* Called from sink I/O thread context. */
966 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
967 struct userdata *u;
968
969 pa_sink_input_assert_ref(i);
970 pa_assert(chunk);
971 pa_assert_se(u = i->userdata);
972
973 if (u->sink->thread_info.rewind_requested)
974 pa_sink_process_rewind(u->sink, 0);
975
976 pa_sink_render_full(u->sink, nbytes, chunk);
977
978 if (i->thread_info.underrun_for > 0) {
979 pa_log_debug("Handling end of underrun.");
980 pa_atomic_store(&u->request_resync, 1);
981 }
982
983 /* let source thread handle the chunk. pass the sample count as well so that
984 * the source IO thread can update the right variables. */
985 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
986 NULL, 0, chunk, NULL);
987 u->send_counter += chunk->length;
988
989 return 0;
990 }
991
992 /* Called from source I/O thread context. */
993 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
994 struct userdata *u;
995
996 pa_source_output_assert_ref(o);
997 pa_source_output_assert_io_context(o);
998 pa_assert_se(u = o->userdata);
999
1000 pa_source_process_rewind(u->source, nbytes);
1001
1002 /* go back on read side, we need to use older sink data for this */
1003 pa_memblockq_rewind(u->sink_memblockq, nbytes);
1004
1005 /* manipulate write index */
1006 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, TRUE);
1007
1008 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
1009 (long long) pa_memblockq_get_length (u->source_memblockq));
1010 }
1011
1012 /* Called from sink I/O thread context. */
1013 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
1014 struct userdata *u;
1015
1016 pa_sink_input_assert_ref(i);
1017 pa_assert_se(u = i->userdata);
1018
1019 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
1020
1021 pa_sink_process_rewind(u->sink, nbytes);
1022
1023 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
1024 u->send_counter -= nbytes;
1025 }
1026
1027 /* Called from source I/O thread context. */
1028 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
1029 size_t delay, rlen, plen;
1030 pa_usec_t now, latency;
1031
1032 now = pa_rtclock_now();
1033 latency = pa_source_get_latency_within_thread(u->source_output->source);
1034 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
1035
1036 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
1037 rlen = pa_memblockq_get_length(u->source_memblockq);
1038 plen = pa_memblockq_get_length(u->sink_memblockq);
1039
1040 snapshot->source_now = now;
1041 snapshot->source_latency = latency;
1042 snapshot->source_delay = delay;
1043 snapshot->recv_counter = u->recv_counter;
1044 snapshot->rlen = rlen + u->sink_skip;
1045 snapshot->plen = plen + u->source_skip;
1046 }
1047
1048 /* Called from source I/O thread context. */
1049 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1051
1052 switch (code) {
1053
1054 case SOURCE_OUTPUT_MESSAGE_POST:
1055
1056 pa_source_output_assert_io_context(u->source_output);
1057
1058 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1059 pa_memblockq_push_align(u->sink_memblockq, chunk);
1060 else
1061 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1062
1063 u->recv_counter += (int64_t) chunk->length;
1064
1065 return 0;
1066
1067 case SOURCE_OUTPUT_MESSAGE_REWIND:
1068 pa_source_output_assert_io_context(u->source_output);
1069
1070 /* manipulate write index, never go past what we have */
1071 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1072 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, TRUE);
1073 else
1074 pa_memblockq_flush_write(u->sink_memblockq, TRUE);
1075
1076 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1077
1078 u->recv_counter -= offset;
1079
1080 return 0;
1081
1082 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1083 struct snapshot *snapshot = (struct snapshot *) data;
1084
1085 source_output_snapshot_within_thread(u, snapshot);
1086 return 0;
1087 }
1088
1089 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1090 apply_diff_time(u, offset);
1091 return 0;
1092
1093 }
1094
1095 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1096 }
1097
1098 /* Called from sink I/O thread context. */
1099 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1100 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1101
1102 switch (code) {
1103
1104 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1105 size_t delay;
1106 pa_usec_t now, latency;
1107 struct snapshot *snapshot = (struct snapshot *) data;
1108
1109 pa_sink_input_assert_io_context(u->sink_input);
1110
1111 now = pa_rtclock_now();
1112 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1113 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1114
1115 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1116
1117 snapshot->sink_now = now;
1118 snapshot->sink_latency = latency;
1119 snapshot->sink_delay = delay;
1120 snapshot->send_counter = u->send_counter;
1121 return 0;
1122 }
1123 }
1124
1125 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1126 }
1127
1128 /* Called from sink I/O thread context. */
1129 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1130 struct userdata *u;
1131
1132 pa_sink_input_assert_ref(i);
1133 pa_assert_se(u = i->userdata);
1134
1135 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1136
1137 /* FIXME: Too small max_rewind:
1138 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1139 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1140 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1141 }
1142
1143 /* Called from source I/O thread context. */
1144 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1145 struct userdata *u;
1146
1147 pa_source_output_assert_ref(o);
1148 pa_assert_se(u = o->userdata);
1149
1150 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1151
1152 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1153 }
1154
1155 /* Called from sink I/O thread context. */
1156 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1157 struct userdata *u;
1158
1159 pa_sink_input_assert_ref(i);
1160 pa_assert_se(u = i->userdata);
1161
1162 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1163
1164 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1165 }
1166
1167 /* Called from sink I/O thread context. */
1168 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1169 struct userdata *u;
1170 pa_usec_t latency;
1171
1172 pa_sink_input_assert_ref(i);
1173 pa_assert_se(u = i->userdata);
1174
1175 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1176
1177 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1178 }
1179
1180 /* Called from source I/O thread context. */
1181 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1182 struct userdata *u;
1183 pa_usec_t latency;
1184
1185 pa_source_output_assert_ref(o);
1186 pa_assert_se(u = o->userdata);
1187
1188 latency = pa_source_get_requested_latency_within_thread(o->source);
1189
1190 pa_log_debug("Source output update requested latency %lld", (long long) latency);
1191 }
1192
1193 /* Called from sink I/O thread context. */
1194 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1195 struct userdata *u;
1196
1197 pa_sink_input_assert_ref(i);
1198 pa_assert_se(u = i->userdata);
1199
1200 pa_log_debug("Sink input update latency range %lld %lld",
1201 (long long) i->sink->thread_info.min_latency,
1202 (long long) i->sink->thread_info.max_latency);
1203
1204 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1205 }
1206
1207 /* Called from source I/O thread context. */
1208 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1209 struct userdata *u;
1210
1211 pa_source_output_assert_ref(o);
1212 pa_assert_se(u = o->userdata);
1213
1214 pa_log_debug("Source output update latency range %lld %lld",
1215 (long long) o->source->thread_info.min_latency,
1216 (long long) o->source->thread_info.max_latency);
1217
1218 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1219 }
1220
1221 /* Called from sink I/O thread context. */
1222 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1223 struct userdata *u;
1224
1225 pa_sink_input_assert_ref(i);
1226 pa_assert_se(u = i->userdata);
1227
1228 pa_log_debug("Sink input update fixed latency %lld",
1229 (long long) i->sink->thread_info.fixed_latency);
1230
1231 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1232 }
1233
1234 /* Called from source I/O thread context. */
1235 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1236 struct userdata *u;
1237
1238 pa_source_output_assert_ref(o);
1239 pa_assert_se(u = o->userdata);
1240
1241 pa_log_debug("Source output update fixed latency %lld",
1242 (long long) o->source->thread_info.fixed_latency);
1243
1244 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1245 }
1246
1247 /* Called from source I/O thread context. */
1248 static void source_output_attach_cb(pa_source_output *o) {
1249 struct userdata *u;
1250
1251 pa_source_output_assert_ref(o);
1252 pa_source_output_assert_io_context(o);
1253 pa_assert_se(u = o->userdata);
1254
1255 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1256 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1257 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1258 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1259
1260 pa_log_debug("Source output %d attach", o->index);
1261
1262 pa_source_attach_within_thread(u->source);
1263
1264 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1265 o->source->thread_info.rtpoll,
1266 PA_RTPOLL_LATE,
1267 u->asyncmsgq);
1268 }
1269
1270 /* Called from sink I/O thread context. */
1271 static void sink_input_attach_cb(pa_sink_input *i) {
1272 struct userdata *u;
1273
1274 pa_sink_input_assert_ref(i);
1275 pa_assert_se(u = i->userdata);
1276
1277 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1278 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1279
1280 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1281 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1282 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1283
1284 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1285 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1286 * HERE. SEE (6) */
1287 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1288
1289 /* FIXME: Too small max_rewind:
1290 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1291 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1292
1293 pa_log_debug("Sink input %d attach", i->index);
1294
1295 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1296 i->sink->thread_info.rtpoll,
1297 PA_RTPOLL_LATE,
1298 u->asyncmsgq);
1299
1300 pa_sink_attach_within_thread(u->sink);
1301 }
1302
1303
1304 /* Called from source I/O thread context. */
1305 static void source_output_detach_cb(pa_source_output *o) {
1306 struct userdata *u;
1307
1308 pa_source_output_assert_ref(o);
1309 pa_source_output_assert_io_context(o);
1310 pa_assert_se(u = o->userdata);
1311
1312 pa_source_detach_within_thread(u->source);
1313 pa_source_set_rtpoll(u->source, NULL);
1314
1315 pa_log_debug("Source output %d detach", o->index);
1316
1317 if (u->rtpoll_item_read) {
1318 pa_rtpoll_item_free(u->rtpoll_item_read);
1319 u->rtpoll_item_read = NULL;
1320 }
1321 }
1322
1323 /* Called from sink I/O thread context. */
1324 static void sink_input_detach_cb(pa_sink_input *i) {
1325 struct userdata *u;
1326
1327 pa_sink_input_assert_ref(i);
1328 pa_assert_se(u = i->userdata);
1329
1330 pa_sink_detach_within_thread(u->sink);
1331
1332 pa_sink_set_rtpoll(u->sink, NULL);
1333
1334 pa_log_debug("Sink input %d detach", i->index);
1335
1336 if (u->rtpoll_item_write) {
1337 pa_rtpoll_item_free(u->rtpoll_item_write);
1338 u->rtpoll_item_write = NULL;
1339 }
1340 }
1341
1342 /* Called from source I/O thread context. */
1343 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1344 struct userdata *u;
1345
1346 pa_source_output_assert_ref(o);
1347 pa_source_output_assert_io_context(o);
1348 pa_assert_se(u = o->userdata);
1349
1350 pa_log_debug("Source output %d state %d", o->index, state);
1351 }
1352
1353 /* Called from sink I/O thread context. */
1354 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1355 struct userdata *u;
1356
1357 pa_sink_input_assert_ref(i);
1358 pa_assert_se(u = i->userdata);
1359
1360 pa_log_debug("Sink input %d state %d", i->index, state);
1361
1362 /* If we are added for the first time, ask for a rewinding so that
1363 * we are heard right-away. */
1364 if (PA_SINK_INPUT_IS_LINKED(state) &&
1365 i->thread_info.state == PA_SINK_INPUT_INIT) {
1366 pa_log_debug("Requesting rewind due to state change.");
1367 pa_sink_input_request_rewind(i, 0, FALSE, TRUE, TRUE);
1368 }
1369 }
1370
1371 /* Called from main context. */
1372 static void source_output_kill_cb(pa_source_output *o) {
1373 struct userdata *u;
1374
1375 pa_source_output_assert_ref(o);
1376 pa_assert_ctl_context();
1377 pa_assert_se(u = o->userdata);
1378
1379 u->dead = TRUE;
1380
1381 /* The order here matters! We first kill the source output, followed
1382 * by the source. That means the source callbacks must be protected
1383 * against an unconnected source output! */
1384 pa_source_output_unlink(u->source_output);
1385 pa_source_unlink(u->source);
1386
1387 pa_source_output_unref(u->source_output);
1388 u->source_output = NULL;
1389
1390 pa_source_unref(u->source);
1391 u->source = NULL;
1392
1393 pa_log_debug("Source output kill %d", o->index);
1394
1395 pa_module_unload_request(u->module, TRUE);
1396 }
1397
1398 /* Called from main context */
1399 static void sink_input_kill_cb(pa_sink_input *i) {
1400 struct userdata *u;
1401
1402 pa_sink_input_assert_ref(i);
1403 pa_assert_se(u = i->userdata);
1404
1405 u->dead = TRUE;
1406
1407 /* The order here matters! We first kill the sink input, followed
1408 * by the sink. That means the sink callbacks must be protected
1409 * against an unconnected sink input! */
1410 pa_sink_input_unlink(u->sink_input);
1411 pa_sink_unlink(u->sink);
1412
1413 pa_sink_input_unref(u->sink_input);
1414 u->sink_input = NULL;
1415
1416 pa_sink_unref(u->sink);
1417 u->sink = NULL;
1418
1419 pa_log_debug("Sink input kill %d", i->index);
1420
1421 pa_module_unload_request(u->module, TRUE);
1422 }
1423
1424 /* Called from main context. */
1425 static pa_bool_t source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1426 struct userdata *u;
1427
1428 pa_source_output_assert_ref(o);
1429 pa_assert_ctl_context();
1430 pa_assert_se(u = o->userdata);
1431
1432 if (u->dead || u->autoloaded)
1433 return FALSE;
1434
1435 return (u->source != dest) && (u->sink != dest->monitor_of);
1436 }
1437
1438 /* Called from main context */
1439 static pa_bool_t sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1440 struct userdata *u;
1441
1442 pa_sink_input_assert_ref(i);
1443 pa_assert_se(u = i->userdata);
1444
1445 if (u->dead || u->autoloaded)
1446 return FALSE;
1447
1448 return u->sink != dest;
1449 }
1450
1451 /* Called from main context. */
1452 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1453 struct userdata *u;
1454
1455 pa_source_output_assert_ref(o);
1456 pa_assert_ctl_context();
1457 pa_assert_se(u = o->userdata);
1458
1459 if (dest) {
1460 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1461 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1462 } else
1463 pa_source_set_asyncmsgq(u->source, NULL);
1464
1465 if (u->source_auto_desc && dest) {
1466 const char *y, *z;
1467 pa_proplist *pl;
1468
1469 pl = pa_proplist_new();
1470 y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION);
1471 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1472 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1473 y ? y : u->sink_input->sink->name);
1474
1475 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1476 pa_proplist_free(pl);
1477 }
1478 }
1479
1480 /* Called from main context */
1481 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1482 struct userdata *u;
1483
1484 pa_sink_input_assert_ref(i);
1485 pa_assert_se(u = i->userdata);
1486
1487 if (dest) {
1488 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1489 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1490 } else
1491 pa_sink_set_asyncmsgq(u->sink, NULL);
1492
1493 if (u->sink_auto_desc && dest) {
1494 const char *y, *z;
1495 pa_proplist *pl;
1496
1497 pl = pa_proplist_new();
1498 y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION);
1499 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1500 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1501 y ? y : u->source_output->source->name);
1502
1503 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1504 pa_proplist_free(pl);
1505 }
1506 }
1507
1508 /* Called from main context */
1509 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1510 struct userdata *u;
1511
1512 pa_sink_input_assert_ref(i);
1513 pa_assert_se(u = i->userdata);
1514
1515 pa_sink_volume_changed(u->sink, &i->volume);
1516 }
1517
1518 /* Called from main context */
1519 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1520 struct userdata *u;
1521
1522 pa_sink_input_assert_ref(i);
1523 pa_assert_se(u = i->userdata);
1524
1525 pa_sink_mute_changed(u->sink, i->muted);
1526 }
1527
1528 /* Called from main context */
1529 static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1530 struct pa_echo_canceller_msg *msg;
1531 struct userdata *u;
1532
1533 pa_assert(o);
1534
1535 msg = PA_ECHO_CANCELLER_MSG(o);
1536 u = msg->userdata;
1537
1538 switch (code) {
1539 case ECHO_CANCELLER_MESSAGE_SET_VOLUME: {
1540 pa_cvolume *v = (pa_cvolume *) userdata;
1541
1542 if (u->use_volume_sharing)
1543 pa_source_set_volume(u->source, v, TRUE, FALSE);
1544 else
1545 pa_source_output_set_volume(u->source_output, v, FALSE, TRUE);
1546
1547 break;
1548 }
1549
1550 default:
1551 pa_assert_not_reached();
1552 break;
1553 }
1554
1555 return 0;
1556 }
1557
1558 /* Called by the canceller, so source I/O thread context. */
1559 void pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1560 *v = ec->msg->userdata->thread_info.current_volume;
1561 }
1562
1563 /* Called by the canceller, so source I/O thread context. */
1564 void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1565 if (!pa_cvolume_equal(&ec->msg->userdata->thread_info.current_volume, v)) {
1566 pa_cvolume *vol = pa_xnewdup(pa_cvolume, v, 1);
1567
1568 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, vol, 0, NULL,
1569 pa_xfree);
1570 }
1571 }
1572
1573 uint32_t pa_echo_canceller_blocksize_power2(unsigned rate, unsigned ms) {
1574 unsigned nframes = (rate * ms) / 1000;
1575 uint32_t y = 1 << ((8 * sizeof(uint32_t)) - 2);
1576
1577 assert(rate >= 4000);
1578 assert(ms >= 1);
1579
1580 /* nframes should be a power of 2, round down to nearest power of two */
1581 while (y > nframes)
1582 y >>= 1;
1583
1584 assert(y >= 1);
1585 return y;
1586 }
1587
1588 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1589 if (pa_streq(method, "null"))
1590 return PA_ECHO_CANCELLER_NULL;
1591 #ifdef HAVE_SPEEX
1592 if (pa_streq(method, "speex"))
1593 return PA_ECHO_CANCELLER_SPEEX;
1594 #endif
1595 #ifdef HAVE_ADRIAN_EC
1596 if (pa_streq(method, "adrian"))
1597 return PA_ECHO_CANCELLER_ADRIAN;
1598 #endif
1599 #ifdef HAVE_WEBRTC
1600 if (pa_streq(method, "webrtc"))
1601 return PA_ECHO_CANCELLER_WEBRTC;
1602 #endif
1603 return PA_ECHO_CANCELLER_INVALID;
1604 }
1605
1606 /* Common initialisation bits between module-echo-cancel and the standalone
1607 * test program.
1608 *
1609 * Called from main context. */
1610 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1611 const char *ec_string;
1612 pa_echo_canceller_method_t ec_method;
1613
1614 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1615 pa_log("Invalid sample format specification or channel map");
1616 goto fail;
1617 }
1618
1619 u->ec = pa_xnew0(pa_echo_canceller, 1);
1620 if (!u->ec) {
1621 pa_log("Failed to alloc echo canceller");
1622 goto fail;
1623 }
1624
1625 ec_string = pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER);
1626 if ((ec_method = get_ec_method_from_string(ec_string)) < 0) {
1627 pa_log("Invalid echo canceller implementation '%s'", ec_string);
1628 goto fail;
1629 }
1630
1631 pa_log_info("Using AEC engine: %s", ec_string);
1632
1633 u->ec->init = ec_table[ec_method].init;
1634 u->ec->play = ec_table[ec_method].play;
1635 u->ec->record = ec_table[ec_method].record;
1636 u->ec->set_drift = ec_table[ec_method].set_drift;
1637 u->ec->run = ec_table[ec_method].run;
1638 u->ec->done = ec_table[ec_method].done;
1639
1640 return 0;
1641
1642 fail:
1643 return -1;
1644 }
1645
1646 /* Called from main context. */
1647 int pa__init(pa_module*m) {
1648 struct userdata *u;
1649 pa_sample_spec source_ss, sink_ss;
1650 pa_channel_map source_map, sink_map;
1651 pa_modargs *ma;
1652 pa_source *source_master=NULL;
1653 pa_sink *sink_master=NULL;
1654 pa_source_output_new_data source_output_data;
1655 pa_sink_input_new_data sink_input_data;
1656 pa_source_new_data source_data;
1657 pa_sink_new_data sink_data;
1658 pa_memchunk silence;
1659 uint32_t temp;
1660 uint32_t nframes = 0;
1661
1662 pa_assert(m);
1663
1664 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1665 pa_log("Failed to parse module arguments.");
1666 goto fail;
1667 }
1668
1669 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1670 pa_log("Master source not found");
1671 goto fail;
1672 }
1673 pa_assert(source_master);
1674
1675 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1676 pa_log("Master sink not found");
1677 goto fail;
1678 }
1679 pa_assert(sink_master);
1680
1681 if (source_master->monitor_of == sink_master) {
1682 pa_log("Can't cancel echo between a sink and its monitor");
1683 goto fail;
1684 }
1685
1686 source_ss = source_master->sample_spec;
1687 source_ss.rate = DEFAULT_RATE;
1688 source_ss.channels = DEFAULT_CHANNELS;
1689 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1690
1691 sink_ss = sink_master->sample_spec;
1692 sink_map = sink_master->channel_map;
1693
1694 u = pa_xnew0(struct userdata, 1);
1695 if (!u) {
1696 pa_log("Failed to alloc userdata");
1697 goto fail;
1698 }
1699 u->core = m->core;
1700 u->module = m;
1701 m->userdata = u;
1702 u->dead = FALSE;
1703
1704 u->use_volume_sharing = TRUE;
1705 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) {
1706 pa_log("use_volume_sharing= expects a boolean argument");
1707 goto fail;
1708 }
1709
1710 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1711 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1712 pa_log("Failed to parse adjust_time value");
1713 goto fail;
1714 }
1715
1716 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1717 u->adjust_time = temp * PA_USEC_PER_SEC;
1718 else
1719 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1720
1721 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1722 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1723 pa_log("Failed to parse adjust_threshold value");
1724 goto fail;
1725 }
1726
1727 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1728 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1729 else
1730 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1731
1732 u->save_aec = DEFAULT_SAVE_AEC;
1733 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1734 pa_log("Failed to parse save_aec value");
1735 goto fail;
1736 }
1737
1738 u->autoloaded = DEFAULT_AUTOLOADED;
1739 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1740 pa_log("Failed to parse autoloaded value");
1741 goto fail;
1742 }
1743
1744 if (init_common(ma, u, &source_ss, &source_map) < 0)
1745 goto fail;
1746
1747 u->asyncmsgq = pa_asyncmsgq_new(0);
1748 u->need_realign = TRUE;
1749
1750 pa_assert(u->ec->init);
1751 if (!u->ec->init(u->core, u->ec, &source_ss, &source_map, &sink_ss, &sink_map, &nframes, pa_modargs_get_value(ma, "aec_args", NULL))) {
1752 pa_log("Failed to init AEC engine");
1753 goto fail;
1754 }
1755
1756 u->source_blocksize = nframes * pa_frame_size(&source_ss);
1757 u->sink_blocksize = nframes * pa_frame_size(&sink_ss);
1758
1759 if (u->ec->params.drift_compensation)
1760 pa_assert(u->ec->set_drift);
1761
1762 /* Create source */
1763 pa_source_new_data_init(&source_data);
1764 source_data.driver = __FILE__;
1765 source_data.module = m;
1766 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1767 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1768 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1769 pa_source_new_data_set_channel_map(&source_data, &source_map);
1770 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1771 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1772 if (!u->autoloaded)
1773 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1774
1775 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1776 pa_log("Invalid properties");
1777 pa_source_new_data_done(&source_data);
1778 goto fail;
1779 }
1780
1781 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1782 const char *y, *z;
1783
1784 y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1785 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1786 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1787 z ? z : source_master->name, y ? y : sink_master->name);
1788 }
1789
1790 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1791 | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1792 pa_source_new_data_done(&source_data);
1793
1794 if (!u->source) {
1795 pa_log("Failed to create source.");
1796 goto fail;
1797 }
1798
1799 u->source->parent.process_msg = source_process_msg_cb;
1800 u->source->set_state = source_set_state_cb;
1801 u->source->update_requested_latency = source_update_requested_latency_cb;
1802 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1803 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1804 if (!u->use_volume_sharing) {
1805 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1806 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1807 pa_source_enable_decibel_volume(u->source, TRUE);
1808 }
1809 u->source->userdata = u;
1810
1811 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1812
1813 /* Create sink */
1814 pa_sink_new_data_init(&sink_data);
1815 sink_data.driver = __FILE__;
1816 sink_data.module = m;
1817 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1818 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1819 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1820 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1821 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1822 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1823 if (!u->autoloaded)
1824 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1825
1826 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1827 pa_log("Invalid properties");
1828 pa_sink_new_data_done(&sink_data);
1829 goto fail;
1830 }
1831
1832 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1833 const char *y, *z;
1834
1835 y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1836 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1837 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1838 z ? z : sink_master->name, y ? y : source_master->name);
1839 }
1840
1841 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1842 | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1843 pa_sink_new_data_done(&sink_data);
1844
1845 if (!u->sink) {
1846 pa_log("Failed to create sink.");
1847 goto fail;
1848 }
1849
1850 u->sink->parent.process_msg = sink_process_msg_cb;
1851 u->sink->set_state = sink_set_state_cb;
1852 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1853 u->sink->request_rewind = sink_request_rewind_cb;
1854 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1855 if (!u->use_volume_sharing) {
1856 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1857 pa_sink_enable_decibel_volume(u->sink, TRUE);
1858 }
1859 u->sink->userdata = u;
1860
1861 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1862
1863 /* Create source output */
1864 pa_source_output_new_data_init(&source_output_data);
1865 source_output_data.driver = __FILE__;
1866 source_output_data.module = m;
1867 pa_source_output_new_data_set_source(&source_output_data, source_master, FALSE);
1868 source_output_data.destination_source = u->source;
1869 /* FIXME
1870 source_output_data.flags = PA_SOURCE_OUTPUT_DONT_INHIBIT_AUTO_SUSPEND; */
1871
1872 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1873 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1874 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_ss);
1875 pa_source_output_new_data_set_channel_map(&source_output_data, &source_map);
1876
1877 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1878 pa_source_output_new_data_done(&source_output_data);
1879
1880 if (!u->source_output)
1881 goto fail;
1882
1883 u->source_output->parent.process_msg = source_output_process_msg_cb;
1884 u->source_output->push = source_output_push_cb;
1885 u->source_output->process_rewind = source_output_process_rewind_cb;
1886 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1887 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1888 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1889 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1890 u->source_output->kill = source_output_kill_cb;
1891 u->source_output->attach = source_output_attach_cb;
1892 u->source_output->detach = source_output_detach_cb;
1893 u->source_output->state_change = source_output_state_change_cb;
1894 u->source_output->may_move_to = source_output_may_move_to_cb;
1895 u->source_output->moving = source_output_moving_cb;
1896 u->source_output->userdata = u;
1897
1898 u->source->output_from_master = u->source_output;
1899
1900 /* Create sink input */
1901 pa_sink_input_new_data_init(&sink_input_data);
1902 sink_input_data.driver = __FILE__;
1903 sink_input_data.module = m;
1904 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, FALSE);
1905 sink_input_data.origin_sink = u->sink;
1906 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1907 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1908 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1909 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1910 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1911
1912 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1913 pa_sink_input_new_data_done(&sink_input_data);
1914
1915 if (!u->sink_input)
1916 goto fail;
1917
1918 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1919 u->sink_input->pop = sink_input_pop_cb;
1920 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1921 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1922 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1923 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1924 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1925 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1926 u->sink_input->kill = sink_input_kill_cb;
1927 u->sink_input->attach = sink_input_attach_cb;
1928 u->sink_input->detach = sink_input_detach_cb;
1929 u->sink_input->state_change = sink_input_state_change_cb;
1930 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1931 u->sink_input->moving = sink_input_moving_cb;
1932 if (!u->use_volume_sharing)
1933 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1934 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1935 u->sink_input->userdata = u;
1936
1937 u->sink->input_to_master = u->sink_input;
1938
1939 pa_sink_input_get_silence(u->sink_input, &silence);
1940
1941 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1942 &source_ss, 1, 1, 0, &silence);
1943 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1944 &sink_ss, 0, 1, 0, &silence);
1945
1946 pa_memblock_unref(silence.memblock);
1947
1948 if (!u->source_memblockq || !u->sink_memblockq) {
1949 pa_log("Failed to create memblockq.");
1950 goto fail;
1951 }
1952
1953 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1954 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1955 else if (u->ec->params.drift_compensation) {
1956 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1957 u->adjust_time = 0;
1958 /* Perform resync just once to give the canceller a leg up */
1959 pa_atomic_store(&u->request_resync, 1);
1960 }
1961
1962 if (u->save_aec) {
1963 pa_log("Creating AEC files in /tmp");
1964 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1965 if (u->captured_file == NULL)
1966 perror ("fopen failed");
1967 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1968 if (u->played_file == NULL)
1969 perror ("fopen failed");
1970 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1971 if (u->canceled_file == NULL)
1972 perror ("fopen failed");
1973 if (u->ec->params.drift_compensation) {
1974 u->drift_file = fopen("/tmp/aec_drift.txt", "w");
1975 if (u->drift_file == NULL)
1976 perror ("fopen failed");
1977 }
1978 }
1979
1980 u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg);
1981 u->ec->msg->parent.process_msg = canceller_process_msg_cb;
1982 u->ec->msg->userdata = u;
1983
1984 u->thread_info.current_volume = u->source->reference_volume;
1985
1986 pa_sink_put(u->sink);
1987 pa_source_put(u->source);
1988
1989 pa_sink_input_put(u->sink_input);
1990 pa_source_output_put(u->source_output);
1991 pa_modargs_free(ma);
1992
1993 return 0;
1994
1995 fail:
1996 if (ma)
1997 pa_modargs_free(ma);
1998
1999 pa__done(m);
2000
2001 return -1;
2002 }
2003
2004 /* Called from main context. */
2005 int pa__get_n_used(pa_module *m) {
2006 struct userdata *u;
2007
2008 pa_assert(m);
2009 pa_assert_se(u = m->userdata);
2010
2011 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
2012 }
2013
2014 /* Called from main context. */
2015 void pa__done(pa_module*m) {
2016 struct userdata *u;
2017
2018 pa_assert(m);
2019
2020 if (!(u = m->userdata))
2021 return;
2022
2023 u->dead = TRUE;
2024
2025 /* See comments in source_output_kill_cb() above regarding
2026 * destruction order! */
2027
2028 if (u->time_event)
2029 u->core->mainloop->time_free(u->time_event);
2030
2031 if (u->source_output)
2032 pa_source_output_unlink(u->source_output);
2033 if (u->sink_input)
2034 pa_sink_input_unlink(u->sink_input);
2035
2036 if (u->source)
2037 pa_source_unlink(u->source);
2038 if (u->sink)
2039 pa_sink_unlink(u->sink);
2040
2041 if (u->source_output)
2042 pa_source_output_unref(u->source_output);
2043 if (u->sink_input)
2044 pa_sink_input_unref(u->sink_input);
2045
2046 if (u->source)
2047 pa_source_unref(u->source);
2048 if (u->sink)
2049 pa_sink_unref(u->sink);
2050
2051 if (u->source_memblockq)
2052 pa_memblockq_free(u->source_memblockq);
2053 if (u->sink_memblockq)
2054 pa_memblockq_free(u->sink_memblockq);
2055
2056 if (u->ec) {
2057 if (u->ec->done)
2058 u->ec->done(u->ec);
2059
2060 pa_xfree(u->ec);
2061 }
2062
2063 if (u->asyncmsgq)
2064 pa_asyncmsgq_unref(u->asyncmsgq);
2065
2066 if (u->save_aec) {
2067 if (u->played_file)
2068 fclose(u->played_file);
2069 if (u->captured_file)
2070 fclose(u->captured_file);
2071 if (u->canceled_file)
2072 fclose(u->canceled_file);
2073 if (u->drift_file)
2074 fclose(u->drift_file);
2075 }
2076
2077 pa_xfree(u);
2078 }
2079
2080 #ifdef ECHO_CANCEL_TEST
2081 /*
2082 * Stand-alone test program for running in the canceller on pre-recorded files.
2083 */
2084 int main(int argc, char* argv[]) {
2085 struct userdata u;
2086 pa_sample_spec source_ss, sink_ss;
2087 pa_channel_map source_map, sink_map;
2088 pa_modargs *ma = NULL;
2089 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
2090 int unused PA_GCC_UNUSED;
2091 int ret = 0, i;
2092 char c;
2093 float drift;
2094 uint32_t nframes;
2095
2096 if (!getenv("MAKE_CHECK"))
2097 pa_log_set_level(PA_LOG_DEBUG);
2098
2099 pa_memzero(&u, sizeof(u));
2100
2101 if (argc < 4 || argc > 7) {
2102 goto usage;
2103 }
2104
2105 u.captured_file = fopen(argv[2], "rb");
2106 if (u.captured_file == NULL) {
2107 perror ("Could not open capture file");
2108 goto fail;
2109 }
2110 u.played_file = fopen(argv[1], "rb");
2111 if (u.played_file == NULL) {
2112 perror ("Could not open play file");
2113 goto fail;
2114 }
2115 u.canceled_file = fopen(argv[3], "wb");
2116 if (u.canceled_file == NULL) {
2117 perror ("Could not open canceled file");
2118 goto fail;
2119 }
2120
2121 u.core = pa_xnew0(pa_core, 1);
2122 u.core->cpu_info.cpu_type = PA_CPU_X86;
2123 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
2124
2125 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
2126 pa_log("Failed to parse module arguments.");
2127 goto fail;
2128 }
2129
2130 source_ss.format = PA_SAMPLE_S16LE;
2131 source_ss.rate = DEFAULT_RATE;
2132 source_ss.channels = DEFAULT_CHANNELS;
2133 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2134
2135 sink_ss.format = PA_SAMPLE_S16LE;
2136 sink_ss.rate = DEFAULT_RATE;
2137 sink_ss.channels = DEFAULT_CHANNELS;
2138 pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2139
2140 if (init_common(ma, &u, &source_ss, &source_map) < 0)
2141 goto fail;
2142
2143 if (!u.ec->init(u.core, u.ec, &source_ss, &source_map, &sink_ss, &sink_map, &nframes,
2144 pa_modargs_get_value(ma, "aec_args", NULL))) {
2145 pa_log("Failed to init AEC engine");
2146 goto fail;
2147 }
2148 u.source_blocksize = nframes * pa_frame_size(&source_ss);
2149 u.sink_blocksize = nframes * pa_frame_size(&sink_ss);
2150
2151 if (u.ec->params.drift_compensation) {
2152 if (argc < 6) {
2153 pa_log("Drift compensation enabled but drift file not specified");
2154 goto fail;
2155 }
2156
2157 u.drift_file = fopen(argv[5], "rt");
2158
2159 if (u.drift_file == NULL) {
2160 perror ("Could not open drift file");
2161 goto fail;
2162 }
2163 }
2164
2165 rdata = pa_xmalloc(u.source_blocksize);
2166 pdata = pa_xmalloc(u.sink_blocksize);
2167 cdata = pa_xmalloc(u.source_blocksize);
2168
2169 if (!u.ec->params.drift_compensation) {
2170 while (fread(rdata, u.source_blocksize, 1, u.captured_file) > 0) {
2171 if (fread(pdata, u.sink_blocksize, 1, u.played_file) == 0) {
2172 perror("Played file ended before captured file");
2173 goto fail;
2174 }
2175
2176 u.ec->run(u.ec, rdata, pdata, cdata);
2177
2178 unused = fwrite(cdata, u.source_blocksize, 1, u.canceled_file);
2179 }
2180 } else {
2181 while (fscanf(u.drift_file, "%c", &c) > 0) {
2182 switch (c) {
2183 case 'd':
2184 if (!fscanf(u.drift_file, "%a", &drift)) {
2185 perror("Drift file incomplete");
2186 goto fail;
2187 }
2188
2189 u.ec->set_drift(u.ec, drift);
2190
2191 break;
2192
2193 case 'c':
2194 if (!fscanf(u.drift_file, "%d", &i)) {
2195 perror("Drift file incomplete");
2196 goto fail;
2197 }
2198
2199 if (fread(rdata, i, 1, u.captured_file) <= 0) {
2200 perror("Captured file ended prematurely");
2201 goto fail;
2202 }
2203
2204 u.ec->record(u.ec, rdata, cdata);
2205
2206 unused = fwrite(cdata, i, 1, u.canceled_file);
2207
2208 break;
2209
2210 case 'p':
2211 if (!fscanf(u.drift_file, "%d", &i)) {
2212 perror("Drift file incomplete");
2213 goto fail;
2214 }
2215
2216 if (fread(pdata, i, 1, u.played_file) <= 0) {
2217 perror("Played file ended prematurely");
2218 goto fail;
2219 }
2220
2221 u.ec->play(u.ec, pdata);
2222
2223 break;
2224 }
2225 }
2226
2227 if (fread(rdata, i, 1, u.captured_file) > 0)
2228 pa_log("All capture data was not consumed");
2229 if (fread(pdata, i, 1, u.played_file) > 0)
2230 pa_log("All playback data was not consumed");
2231 }
2232
2233 u.ec->done(u.ec);
2234
2235 out:
2236 if (u.captured_file)
2237 fclose(u.captured_file);
2238 if (u.played_file)
2239 fclose(u.played_file);
2240 if (u.canceled_file)
2241 fclose(u.canceled_file);
2242 if (u.drift_file)
2243 fclose(u.drift_file);
2244
2245 pa_xfree(rdata);
2246 pa_xfree(pdata);
2247 pa_xfree(cdata);
2248
2249 pa_xfree(u.ec);
2250 pa_xfree(u.core);
2251
2252 if (ma)
2253 pa_modargs_free(ma);
2254
2255 return ret;
2256
2257 usage:
2258 pa_log("Usage: %s play_file rec_file out_file [module args] [drift_file]", argv[0]);
2259
2260 fail:
2261 ret = -1;
2262 goto out;
2263 }
2264 #endif /* ECHO_CANCEL_TEST */