]> code.delx.au - pulseaudio/blob - src/modules/echo-cancel/module-echo-cancel.c
Remove pa_bool_t and replace it with bool.
[pulseaudio] / src / modules / echo-cancel / module-echo-cancel.c
1 /***
2 This file is part of PulseAudio.
3
4 Copyright 2010 Wim Taymans <wim.taymans@gmail.com>
5
6 Based on module-virtual-sink.c
7 module-virtual-source.c
8 module-loopback.c
9
10 Copyright 2010 Intel Corporation
11 Contributor: Pierre-Louis Bossart <pierre-louis.bossart@intel.com>
12
13 PulseAudio is free software; you can redistribute it and/or modify
14 it under the terms of the GNU Lesser General Public License as published
15 by the Free Software Foundation; either version 2.1 of the License,
16 or (at your option) any later version.
17
18 PulseAudio is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU Lesser General Public License
24 along with PulseAudio; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 USA.
27 ***/
28
29 #ifdef HAVE_CONFIG_H
30 #include <config.h>
31 #endif
32
33 #include <stdio.h>
34 #include <math.h>
35
36 #include "echo-cancel.h"
37
38 #include <pulse/xmalloc.h>
39 #include <pulse/timeval.h>
40 #include <pulse/rtclock.h>
41
42 #include <pulsecore/i18n.h>
43 #include <pulsecore/atomic.h>
44 #include <pulsecore/macro.h>
45 #include <pulsecore/namereg.h>
46 #include <pulsecore/sink.h>
47 #include <pulsecore/module.h>
48 #include <pulsecore/core-rtclock.h>
49 #include <pulsecore/core-util.h>
50 #include <pulsecore/modargs.h>
51 #include <pulsecore/log.h>
52 #include <pulsecore/rtpoll.h>
53 #include <pulsecore/sample-util.h>
54 #include <pulsecore/ltdl-helper.h>
55
56 #include "module-echo-cancel-symdef.h"
57
58 PA_MODULE_AUTHOR("Wim Taymans");
59 PA_MODULE_DESCRIPTION("Echo Cancellation");
60 PA_MODULE_VERSION(PACKAGE_VERSION);
61 PA_MODULE_LOAD_ONCE(false);
62 PA_MODULE_USAGE(
63 _("source_name=<name for the source> "
64 "source_properties=<properties for the source> "
65 "source_master=<name of source to filter> "
66 "sink_name=<name for the sink> "
67 "sink_properties=<properties for the sink> "
68 "sink_master=<name of sink to filter> "
69 "adjust_time=<how often to readjust rates in s> "
70 "adjust_threshold=<how much drift to readjust after in ms> "
71 "format=<sample format> "
72 "rate=<sample rate> "
73 "channels=<number of channels> "
74 "channel_map=<channel map> "
75 "aec_method=<implementation to use> "
76 "aec_args=<parameters for the AEC engine> "
77 "save_aec=<save AEC data in /tmp> "
78 "autoloaded=<set if this module is being loaded automatically> "
79 "use_volume_sharing=<yes or no> "
80 ));
81
82 /* NOTE: Make sure the enum and ec_table are maintained in the correct order */
83 typedef enum {
84 PA_ECHO_CANCELLER_INVALID = -1,
85 PA_ECHO_CANCELLER_NULL,
86 #ifdef HAVE_SPEEX
87 PA_ECHO_CANCELLER_SPEEX,
88 #endif
89 #ifdef HAVE_ADRIAN_EC
90 PA_ECHO_CANCELLER_ADRIAN,
91 #endif
92 #ifdef HAVE_WEBRTC
93 PA_ECHO_CANCELLER_WEBRTC,
94 #endif
95 } pa_echo_canceller_method_t;
96
97 #ifdef HAVE_WEBRTC
98 #define DEFAULT_ECHO_CANCELLER "webrtc"
99 #else
100 #define DEFAULT_ECHO_CANCELLER "speex"
101 #endif
102
103 static const pa_echo_canceller ec_table[] = {
104 {
105 /* Null, Dummy echo canceller (just copies data) */
106 .init = pa_null_ec_init,
107 .run = pa_null_ec_run,
108 .done = pa_null_ec_done,
109 },
110 #ifdef HAVE_SPEEX
111 {
112 /* Speex */
113 .init = pa_speex_ec_init,
114 .run = pa_speex_ec_run,
115 .done = pa_speex_ec_done,
116 },
117 #endif
118 #ifdef HAVE_ADRIAN_EC
119 {
120 /* Adrian Andre's NLMS implementation */
121 .init = pa_adrian_ec_init,
122 .run = pa_adrian_ec_run,
123 .done = pa_adrian_ec_done,
124 },
125 #endif
126 #ifdef HAVE_WEBRTC
127 {
128 /* WebRTC's audio processing engine */
129 .init = pa_webrtc_ec_init,
130 .play = pa_webrtc_ec_play,
131 .record = pa_webrtc_ec_record,
132 .set_drift = pa_webrtc_ec_set_drift,
133 .run = pa_webrtc_ec_run,
134 .done = pa_webrtc_ec_done,
135 },
136 #endif
137 };
138
139 #define DEFAULT_RATE 32000
140 #define DEFAULT_CHANNELS 1
141 #define DEFAULT_ADJUST_TIME_USEC (1*PA_USEC_PER_SEC)
142 #define DEFAULT_ADJUST_TOLERANCE (5*PA_USEC_PER_MSEC)
143 #define DEFAULT_SAVE_AEC false
144 #define DEFAULT_AUTOLOADED false
145
146 #define MEMBLOCKQ_MAXLENGTH (16*1024*1024)
147
148 /* Can only be used in main context */
149 #define IS_ACTIVE(u) ((pa_source_get_state((u)->source) == PA_SOURCE_RUNNING) && \
150 (pa_sink_get_state((u)->sink) == PA_SINK_RUNNING))
151
152 /* This module creates a new (virtual) source and sink.
153 *
154 * The data sent to the new sink is kept in a memblockq before being
155 * forwarded to the real sink_master.
156 *
157 * Data read from source_master is matched against the saved sink data and
158 * echo canceled data is then pushed onto the new source.
159 *
160 * Both source and sink masters have their own threads to push/pull data
161 * respectively. We however perform all our actions in the source IO thread.
162 * To do this we send all played samples to the source IO thread where they
163 * are then pushed into the memblockq.
164 *
165 * Alignment is performed in two steps:
166 *
167 * 1) when something happens that requires quick adjustment of the alignment of
168 * capture and playback samples, we perform a resync. This adjusts the
169 * position in the playback memblock to the requested sample. Quick
170 * adjustments include moving the playback samples before the capture
171 * samples (because else the echo canceler does not work) or when the
172 * playback pointer drifts too far away.
173 *
174 * 2) periodically check the difference between capture and playback. We use a
175 * low and high watermark for adjusting the alignment. Playback should always
176 * be before capture and the difference should not be bigger than one frame
177 * size. We would ideally like to resample the sink_input but most driver
178 * don't give enough accuracy to be able to do that right now.
179 */
180
181 struct userdata;
182
183 struct pa_echo_canceller_msg {
184 pa_msgobject parent;
185 struct userdata *userdata;
186 };
187
188 PA_DEFINE_PRIVATE_CLASS(pa_echo_canceller_msg, pa_msgobject);
189 #define PA_ECHO_CANCELLER_MSG(o) (pa_echo_canceller_msg_cast(o))
190
191 struct snapshot {
192 pa_usec_t sink_now;
193 pa_usec_t sink_latency;
194 size_t sink_delay;
195 int64_t send_counter;
196
197 pa_usec_t source_now;
198 pa_usec_t source_latency;
199 size_t source_delay;
200 int64_t recv_counter;
201 size_t rlen;
202 size_t plen;
203 };
204
205 struct userdata {
206 pa_core *core;
207 pa_module *module;
208
209 bool autoloaded;
210 bool dead;
211 bool save_aec;
212
213 pa_echo_canceller *ec;
214 uint32_t source_output_blocksize;
215 uint32_t source_blocksize;
216 uint32_t sink_blocksize;
217
218 bool need_realign;
219
220 /* to wakeup the source I/O thread */
221 pa_asyncmsgq *asyncmsgq;
222 pa_rtpoll_item *rtpoll_item_read, *rtpoll_item_write;
223
224 pa_source *source;
225 bool source_auto_desc;
226 pa_source_output *source_output;
227 pa_memblockq *source_memblockq; /* echo canceler needs fixed sized chunks */
228 size_t source_skip;
229
230 pa_sink *sink;
231 bool sink_auto_desc;
232 pa_sink_input *sink_input;
233 pa_memblockq *sink_memblockq;
234 int64_t send_counter; /* updated in sink IO thread */
235 int64_t recv_counter;
236 size_t sink_skip;
237
238 /* Bytes left over from previous iteration */
239 size_t sink_rem;
240 size_t source_rem;
241
242 pa_atomic_t request_resync;
243
244 pa_time_event *time_event;
245 pa_usec_t adjust_time;
246 int adjust_threshold;
247
248 FILE *captured_file;
249 FILE *played_file;
250 FILE *canceled_file;
251 FILE *drift_file;
252
253 bool use_volume_sharing;
254
255 struct {
256 pa_cvolume current_volume;
257 } thread_info;
258 };
259
260 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot);
261
262 static const char* const valid_modargs[] = {
263 "source_name",
264 "source_properties",
265 "source_master",
266 "sink_name",
267 "sink_properties",
268 "sink_master",
269 "adjust_time",
270 "adjust_threshold",
271 "format",
272 "rate",
273 "channels",
274 "channel_map",
275 "aec_method",
276 "aec_args",
277 "save_aec",
278 "autoloaded",
279 "use_volume_sharing",
280 NULL
281 };
282
283 enum {
284 SOURCE_OUTPUT_MESSAGE_POST = PA_SOURCE_OUTPUT_MESSAGE_MAX,
285 SOURCE_OUTPUT_MESSAGE_REWIND,
286 SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT,
287 SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME
288 };
289
290 enum {
291 SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT
292 };
293
294 enum {
295 ECHO_CANCELLER_MESSAGE_SET_VOLUME,
296 };
297
298 static int64_t calc_diff(struct userdata *u, struct snapshot *snapshot) {
299 int64_t diff_time, buffer_latency;
300 pa_usec_t plen, rlen, source_delay, sink_delay, recv_counter, send_counter;
301
302 /* get latency difference between playback and record */
303 plen = pa_bytes_to_usec(snapshot->plen, &u->sink_input->sample_spec);
304 rlen = pa_bytes_to_usec(snapshot->rlen, &u->source_output->sample_spec);
305 if (plen > rlen)
306 buffer_latency = plen - rlen;
307 else
308 buffer_latency = 0;
309
310 source_delay = pa_bytes_to_usec(snapshot->source_delay, &u->source_output->sample_spec);
311 sink_delay = pa_bytes_to_usec(snapshot->sink_delay, &u->sink_input->sample_spec);
312 buffer_latency += source_delay + sink_delay;
313
314 /* add the latency difference due to samples not yet transferred */
315 send_counter = pa_bytes_to_usec(snapshot->send_counter, &u->sink->sample_spec);
316 recv_counter = pa_bytes_to_usec(snapshot->recv_counter, &u->sink->sample_spec);
317 if (recv_counter <= send_counter)
318 buffer_latency += (int64_t) (send_counter - recv_counter);
319 else
320 buffer_latency += PA_CLIP_SUB(buffer_latency, (int64_t) (recv_counter - send_counter));
321
322 /* capture and playback are perfectly aligned when diff_time is 0 */
323 diff_time = (snapshot->sink_now + snapshot->sink_latency - buffer_latency) -
324 (snapshot->source_now - snapshot->source_latency);
325
326 pa_log_debug("Diff %lld (%lld - %lld + %lld) %lld %lld %lld %lld", (long long) diff_time,
327 (long long) snapshot->sink_latency,
328 (long long) buffer_latency, (long long) snapshot->source_latency,
329 (long long) source_delay, (long long) sink_delay,
330 (long long) (send_counter - recv_counter),
331 (long long) (snapshot->sink_now - snapshot->source_now));
332
333 return diff_time;
334 }
335
336 /* Called from main context */
337 static void time_callback(pa_mainloop_api *a, pa_time_event *e, const struct timeval *t, void *userdata) {
338 struct userdata *u = userdata;
339 uint32_t old_rate, base_rate, new_rate;
340 int64_t diff_time;
341 /*size_t fs*/
342 struct snapshot latency_snapshot;
343
344 pa_assert(u);
345 pa_assert(a);
346 pa_assert(u->time_event == e);
347 pa_assert_ctl_context();
348
349 if (!IS_ACTIVE(u))
350 return;
351
352 /* update our snapshots */
353 pa_asyncmsgq_send(u->source_output->source->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
354 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
355
356 /* calculate drift between capture and playback */
357 diff_time = calc_diff(u, &latency_snapshot);
358
359 /*fs = pa_frame_size(&u->source_output->sample_spec);*/
360 old_rate = u->sink_input->sample_spec.rate;
361 base_rate = u->source_output->sample_spec.rate;
362
363 if (diff_time < 0) {
364 /* recording before playback, we need to adjust quickly. The echo
365 * canceler does not work in this case. */
366 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
367 NULL, diff_time, NULL, NULL);
368 /*new_rate = base_rate - ((pa_usec_to_bytes(-diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
369 new_rate = base_rate;
370 }
371 else {
372 if (diff_time > u->adjust_threshold) {
373 /* diff too big, quickly adjust */
374 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME,
375 NULL, diff_time, NULL, NULL);
376 }
377
378 /* recording behind playback, we need to slowly adjust the rate to match */
379 /*new_rate = base_rate + ((pa_usec_to_bytes(diff_time, &u->source_output->sample_spec) / fs) * PA_USEC_PER_SEC) / u->adjust_time;*/
380
381 /* assume equal samplerates for now */
382 new_rate = base_rate;
383 }
384
385 /* make sure we don't make too big adjustments because that sounds horrible */
386 if (new_rate > base_rate * 1.1 || new_rate < base_rate * 0.9)
387 new_rate = base_rate;
388
389 if (new_rate != old_rate) {
390 pa_log_info("Old rate %lu Hz, new rate %lu Hz", (unsigned long) old_rate, (unsigned long) new_rate);
391
392 pa_sink_input_set_rate(u->sink_input, new_rate);
393 }
394
395 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
396 }
397
398 /* Called from source I/O thread context */
399 static int source_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
400 struct userdata *u = PA_SOURCE(o)->userdata;
401
402 switch (code) {
403
404 case PA_SOURCE_MESSAGE_GET_LATENCY:
405
406 /* The source is _put() before the source output is, so let's
407 * make sure we don't access it in that time. Also, the
408 * source output is first shut down, the source second. */
409 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
410 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state)) {
411 *((pa_usec_t*) data) = 0;
412 return 0;
413 }
414
415 *((pa_usec_t*) data) =
416
417 /* Get the latency of the master source */
418 pa_source_get_latency_within_thread(u->source_output->source) +
419 /* Add the latency internal to our source output on top */
420 pa_bytes_to_usec(pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq), &u->source_output->source->sample_spec) +
421 /* and the buffering we do on the source */
422 pa_bytes_to_usec(u->source_output_blocksize, &u->source_output->source->sample_spec);
423
424 return 0;
425
426 case PA_SOURCE_MESSAGE_SET_VOLUME_SYNCED:
427 u->thread_info.current_volume = u->source->reference_volume;
428 break;
429 }
430
431 return pa_source_process_msg(o, code, data, offset, chunk);
432 }
433
434 /* Called from sink I/O thread context */
435 static int sink_process_msg_cb(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
436 struct userdata *u = PA_SINK(o)->userdata;
437
438 switch (code) {
439
440 case PA_SINK_MESSAGE_GET_LATENCY:
441
442 /* The sink is _put() before the sink input is, so let's
443 * make sure we don't access it in that time. Also, the
444 * sink input is first shut down, the sink second. */
445 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
446 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state)) {
447 *((pa_usec_t*) data) = 0;
448 return 0;
449 }
450
451 *((pa_usec_t*) data) =
452
453 /* Get the latency of the master sink */
454 pa_sink_get_latency_within_thread(u->sink_input->sink) +
455
456 /* Add the latency internal to our sink input on top */
457 pa_bytes_to_usec(pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq), &u->sink_input->sink->sample_spec);
458
459 return 0;
460 }
461
462 return pa_sink_process_msg(o, code, data, offset, chunk);
463 }
464
465 /* Called from main context */
466 static int source_set_state_cb(pa_source *s, pa_source_state_t state) {
467 struct userdata *u;
468
469 pa_source_assert_ref(s);
470 pa_assert_se(u = s->userdata);
471
472 if (!PA_SOURCE_IS_LINKED(state) ||
473 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
474 return 0;
475
476 if (state == PA_SOURCE_RUNNING) {
477 /* restart timer when both sink and source are active */
478 if (IS_ACTIVE(u) && u->adjust_time)
479 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
480
481 pa_atomic_store(&u->request_resync, 1);
482 pa_source_output_cork(u->source_output, false);
483 } else if (state == PA_SOURCE_SUSPENDED) {
484 pa_source_output_cork(u->source_output, true);
485 }
486
487 return 0;
488 }
489
490 /* Called from main context */
491 static int sink_set_state_cb(pa_sink *s, pa_sink_state_t state) {
492 struct userdata *u;
493
494 pa_sink_assert_ref(s);
495 pa_assert_se(u = s->userdata);
496
497 if (!PA_SINK_IS_LINKED(state) ||
498 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
499 return 0;
500
501 if (state == PA_SINK_RUNNING) {
502 /* restart timer when both sink and source are active */
503 if (IS_ACTIVE(u) && u->adjust_time)
504 pa_core_rttime_restart(u->core, u->time_event, pa_rtclock_now() + u->adjust_time);
505
506 pa_atomic_store(&u->request_resync, 1);
507 pa_sink_input_cork(u->sink_input, false);
508 } else if (state == PA_SINK_SUSPENDED) {
509 pa_sink_input_cork(u->sink_input, true);
510 }
511
512 return 0;
513 }
514
515 /* Called from source I/O thread context */
516 static void source_update_requested_latency_cb(pa_source *s) {
517 struct userdata *u;
518
519 pa_source_assert_ref(s);
520 pa_assert_se(u = s->userdata);
521
522 if (!PA_SOURCE_IS_LINKED(u->source->thread_info.state) ||
523 !PA_SOURCE_OUTPUT_IS_LINKED(u->source_output->thread_info.state))
524 return;
525
526 pa_log_debug("Source update requested latency");
527
528 /* Just hand this one over to the master source */
529 pa_source_output_set_requested_latency_within_thread(
530 u->source_output,
531 pa_source_get_requested_latency_within_thread(s));
532 }
533
534 /* Called from sink I/O thread context */
535 static void sink_update_requested_latency_cb(pa_sink *s) {
536 struct userdata *u;
537
538 pa_sink_assert_ref(s);
539 pa_assert_se(u = s->userdata);
540
541 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
542 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
543 return;
544
545 pa_log_debug("Sink update requested latency");
546
547 /* Just hand this one over to the master sink */
548 pa_sink_input_set_requested_latency_within_thread(
549 u->sink_input,
550 pa_sink_get_requested_latency_within_thread(s));
551 }
552
553 /* Called from sink I/O thread context */
554 static void sink_request_rewind_cb(pa_sink *s) {
555 struct userdata *u;
556
557 pa_sink_assert_ref(s);
558 pa_assert_se(u = s->userdata);
559
560 if (!PA_SINK_IS_LINKED(u->sink->thread_info.state) ||
561 !PA_SINK_INPUT_IS_LINKED(u->sink_input->thread_info.state))
562 return;
563
564 pa_log_debug("Sink request rewind %lld", (long long) s->thread_info.rewind_nbytes);
565
566 /* Just hand this one over to the master sink */
567 pa_sink_input_request_rewind(u->sink_input,
568 s->thread_info.rewind_nbytes, true, false, false);
569 }
570
571 /* Called from main context */
572 static void source_set_volume_cb(pa_source *s) {
573 struct userdata *u;
574
575 pa_source_assert_ref(s);
576 pa_assert_se(u = s->userdata);
577
578 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
579 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
580 return;
581
582 pa_source_output_set_volume(u->source_output, &s->real_volume, s->save_volume, true);
583 }
584
585 /* Called from main context */
586 static void sink_set_volume_cb(pa_sink *s) {
587 struct userdata *u;
588
589 pa_sink_assert_ref(s);
590 pa_assert_se(u = s->userdata);
591
592 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
593 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
594 return;
595
596 pa_sink_input_set_volume(u->sink_input, &s->real_volume, s->save_volume, true);
597 }
598
599 /* Called from main context. */
600 static void source_get_volume_cb(pa_source *s) {
601 struct userdata *u;
602 pa_cvolume v;
603
604 pa_source_assert_ref(s);
605 pa_assert_se(u = s->userdata);
606
607 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
608 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
609 return;
610
611 pa_source_output_get_volume(u->source_output, &v, true);
612
613 if (pa_cvolume_equal(&s->real_volume, &v))
614 /* no change */
615 return;
616
617 s->real_volume = v;
618 pa_source_set_soft_volume(s, NULL);
619 }
620
621 /* Called from main context */
622 static void source_set_mute_cb(pa_source *s) {
623 struct userdata *u;
624
625 pa_source_assert_ref(s);
626 pa_assert_se(u = s->userdata);
627
628 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
629 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
630 return;
631
632 pa_source_output_set_mute(u->source_output, s->muted, s->save_muted);
633 }
634
635 /* Called from main context */
636 static void sink_set_mute_cb(pa_sink *s) {
637 struct userdata *u;
638
639 pa_sink_assert_ref(s);
640 pa_assert_se(u = s->userdata);
641
642 if (!PA_SINK_IS_LINKED(pa_sink_get_state(s)) ||
643 !PA_SINK_INPUT_IS_LINKED(pa_sink_input_get_state(u->sink_input)))
644 return;
645
646 pa_sink_input_set_mute(u->sink_input, s->muted, s->save_muted);
647 }
648
649 /* Called from main context */
650 static void source_get_mute_cb(pa_source *s) {
651 struct userdata *u;
652
653 pa_source_assert_ref(s);
654 pa_assert_se(u = s->userdata);
655
656 if (!PA_SOURCE_IS_LINKED(pa_source_get_state(s)) ||
657 !PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output)))
658 return;
659
660 pa_source_output_get_mute(u->source_output);
661 }
662
663 /* Called from source I/O thread context. */
664 static void apply_diff_time(struct userdata *u, int64_t diff_time) {
665 int64_t diff;
666
667 if (diff_time < 0) {
668 diff = pa_usec_to_bytes(-diff_time, &u->sink_input->sample_spec);
669
670 if (diff > 0) {
671 /* add some extra safety samples to compensate for jitter in the
672 * timings */
673 diff += 10 * pa_frame_size (&u->sink_input->sample_spec);
674
675 pa_log("Playback after capture (%lld), drop sink %lld", (long long) diff_time, (long long) diff);
676
677 u->sink_skip = diff;
678 u->source_skip = 0;
679 }
680 } else if (diff_time > 0) {
681 diff = pa_usec_to_bytes(diff_time, &u->source_output->sample_spec);
682
683 if (diff > 0) {
684 pa_log("Playback too far ahead (%lld), drop source %lld", (long long) diff_time, (long long) diff);
685
686 u->source_skip = diff;
687 u->sink_skip = 0;
688 }
689 }
690 }
691
692 /* Called from source I/O thread context. */
693 static void do_resync(struct userdata *u) {
694 int64_t diff_time;
695 struct snapshot latency_snapshot;
696
697 pa_log("Doing resync");
698
699 /* update our snapshot */
700 source_output_snapshot_within_thread(u, &latency_snapshot);
701 pa_asyncmsgq_send(u->sink_input->sink->asyncmsgq, PA_MSGOBJECT(u->sink_input), SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT, &latency_snapshot, 0, NULL);
702
703 /* calculate drift between capture and playback */
704 diff_time = calc_diff(u, &latency_snapshot);
705
706 /* and adjust for the drift */
707 apply_diff_time(u, diff_time);
708 }
709
710 /* 1. Calculate drift at this point, pass to canceller
711 * 2. Push out playback samples in blocksize chunks
712 * 3. Push out capture samples in blocksize chunks
713 * 4. ???
714 * 5. Profit
715 *
716 * Called from source I/O thread context.
717 */
718 static void do_push_drift_comp(struct userdata *u) {
719 size_t rlen, plen;
720 pa_memchunk rchunk, pchunk, cchunk;
721 uint8_t *rdata, *pdata, *cdata;
722 float drift;
723 int unused PA_GCC_UNUSED;
724
725 rlen = pa_memblockq_get_length(u->source_memblockq);
726 plen = pa_memblockq_get_length(u->sink_memblockq);
727
728 /* Estimate snapshot drift as follows:
729 * pd: amount of data consumed since last time
730 * rd: amount of data consumed since last time
731 *
732 * drift = (pd - rd) / rd;
733 *
734 * We calculate pd and rd as the memblockq length less the number of
735 * samples left from the last iteration (to avoid double counting
736 * those remainder samples.
737 */
738 drift = ((float)(plen - u->sink_rem) - (rlen - u->source_rem)) / ((float)(rlen - u->source_rem));
739 u->sink_rem = plen % u->sink_blocksize;
740 u->source_rem = rlen % u->source_output_blocksize;
741
742 /* Now let the canceller work its drift compensation magic */
743 u->ec->set_drift(u->ec, drift);
744
745 if (u->save_aec) {
746 if (u->drift_file)
747 fprintf(u->drift_file, "d %a\n", drift);
748 }
749
750 /* Send in the playback samples first */
751 while (plen >= u->sink_blocksize) {
752 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
753 pdata = pa_memblock_acquire(pchunk.memblock);
754 pdata += pchunk.index;
755
756 u->ec->play(u->ec, pdata);
757
758 if (u->save_aec) {
759 if (u->drift_file)
760 fprintf(u->drift_file, "p %d\n", u->sink_blocksize);
761 if (u->played_file)
762 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
763 }
764
765 pa_memblock_release(pchunk.memblock);
766 pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
767 pa_memblock_unref(pchunk.memblock);
768
769 plen -= u->sink_blocksize;
770 }
771
772 /* And now the capture samples */
773 while (rlen >= u->source_output_blocksize) {
774 pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk);
775
776 rdata = pa_memblock_acquire(rchunk.memblock);
777 rdata += rchunk.index;
778
779 cchunk.index = 0;
780 cchunk.length = u->source_output_blocksize;
781 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
782 cdata = pa_memblock_acquire(cchunk.memblock);
783
784 u->ec->record(u->ec, rdata, cdata);
785
786 if (u->save_aec) {
787 if (u->drift_file)
788 fprintf(u->drift_file, "c %d\n", u->source_output_blocksize);
789 if (u->captured_file)
790 unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file);
791 if (u->canceled_file)
792 unused = fwrite(cdata, 1, u->source_output_blocksize, u->canceled_file);
793 }
794
795 pa_memblock_release(cchunk.memblock);
796 pa_memblock_release(rchunk.memblock);
797
798 pa_memblock_unref(rchunk.memblock);
799
800 pa_source_post(u->source, &cchunk);
801 pa_memblock_unref(cchunk.memblock);
802
803 pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize);
804 rlen -= u->source_output_blocksize;
805 }
806 }
807
808 /* This one's simpler than the drift compensation case -- we just iterate over
809 * the capture buffer, and pass the canceller blocksize bytes of playback and
810 * capture data.
811 *
812 * Called from source I/O thread context. */
813 static void do_push(struct userdata *u) {
814 size_t rlen, plen;
815 pa_memchunk rchunk, pchunk, cchunk;
816 uint8_t *rdata, *pdata, *cdata;
817 int unused PA_GCC_UNUSED;
818
819 rlen = pa_memblockq_get_length(u->source_memblockq);
820 plen = pa_memblockq_get_length(u->sink_memblockq);
821
822 while (rlen >= u->source_output_blocksize) {
823
824 /* take fixed blocks from recorded and played samples */
825 pa_memblockq_peek_fixed_size(u->source_memblockq, u->source_output_blocksize, &rchunk);
826 pa_memblockq_peek_fixed_size(u->sink_memblockq, u->sink_blocksize, &pchunk);
827
828 /* we ran out of played data and pchunk has been filled with silence bytes */
829 if (plen < u->sink_blocksize)
830 pa_memblockq_seek(u->sink_memblockq, u->sink_blocksize - plen, PA_SEEK_RELATIVE, true);
831
832 rdata = pa_memblock_acquire(rchunk.memblock);
833 rdata += rchunk.index;
834 pdata = pa_memblock_acquire(pchunk.memblock);
835 pdata += pchunk.index;
836
837 cchunk.index = 0;
838 cchunk.length = u->source_blocksize;
839 cchunk.memblock = pa_memblock_new(u->source->core->mempool, cchunk.length);
840 cdata = pa_memblock_acquire(cchunk.memblock);
841
842 if (u->save_aec) {
843 if (u->captured_file)
844 unused = fwrite(rdata, 1, u->source_output_blocksize, u->captured_file);
845 if (u->played_file)
846 unused = fwrite(pdata, 1, u->sink_blocksize, u->played_file);
847 }
848
849 /* perform echo cancellation */
850 u->ec->run(u->ec, rdata, pdata, cdata);
851
852 if (u->save_aec) {
853 if (u->canceled_file)
854 unused = fwrite(cdata, 1, u->source_blocksize, u->canceled_file);
855 }
856
857 pa_memblock_release(cchunk.memblock);
858 pa_memblock_release(pchunk.memblock);
859 pa_memblock_release(rchunk.memblock);
860
861 /* drop consumed source samples */
862 pa_memblockq_drop(u->source_memblockq, u->source_output_blocksize);
863 pa_memblock_unref(rchunk.memblock);
864 rlen -= u->source_output_blocksize;
865
866 /* drop consumed sink samples */
867 pa_memblockq_drop(u->sink_memblockq, u->sink_blocksize);
868 pa_memblock_unref(pchunk.memblock);
869
870 if (plen >= u->sink_blocksize)
871 plen -= u->sink_blocksize;
872 else
873 plen = 0;
874
875 /* forward the (echo-canceled) data to the virtual source */
876 pa_source_post(u->source, &cchunk);
877 pa_memblock_unref(cchunk.memblock);
878 }
879 }
880
881 /* Called from source I/O thread context. */
882 static void source_output_push_cb(pa_source_output *o, const pa_memchunk *chunk) {
883 struct userdata *u;
884 size_t rlen, plen, to_skip;
885 pa_memchunk rchunk;
886
887 pa_source_output_assert_ref(o);
888 pa_source_output_assert_io_context(o);
889 pa_assert_se(u = o->userdata);
890
891 if (!PA_SOURCE_OUTPUT_IS_LINKED(pa_source_output_get_state(u->source_output))) {
892 pa_log("Push when no link?");
893 return;
894 }
895
896 if (PA_UNLIKELY(u->source->thread_info.state != PA_SOURCE_RUNNING ||
897 u->sink->thread_info.state != PA_SINK_RUNNING)) {
898 pa_source_post(u->source, chunk);
899 return;
900 }
901
902 /* handle queued messages, do any message sending of our own */
903 while (pa_asyncmsgq_process_one(u->asyncmsgq) > 0)
904 ;
905
906 pa_memblockq_push_align(u->source_memblockq, chunk);
907
908 rlen = pa_memblockq_get_length(u->source_memblockq);
909 plen = pa_memblockq_get_length(u->sink_memblockq);
910
911 /* Let's not do anything else till we have enough data to process */
912 if (rlen < u->source_output_blocksize)
913 return;
914
915 /* See if we need to drop samples in order to sync */
916 if (pa_atomic_cmpxchg (&u->request_resync, 1, 0)) {
917 do_resync(u);
918 }
919
920 /* Okay, skip cancellation for skipped source samples if needed. */
921 if (PA_UNLIKELY(u->source_skip)) {
922 /* The slightly tricky bit here is that we drop all but modulo
923 * blocksize bytes and then adjust for that last bit on the sink side.
924 * We do this because the source data is coming at a fixed rate, which
925 * means the only way to try to catch up is drop sink samples and let
926 * the canceller cope up with this. */
927 to_skip = rlen >= u->source_skip ? u->source_skip : rlen;
928 to_skip -= to_skip % u->source_output_blocksize;
929
930 if (to_skip) {
931 pa_memblockq_peek_fixed_size(u->source_memblockq, to_skip, &rchunk);
932 pa_source_post(u->source, &rchunk);
933
934 pa_memblock_unref(rchunk.memblock);
935 pa_memblockq_drop(u->source_memblockq, to_skip);
936
937 rlen -= to_skip;
938 u->source_skip -= to_skip;
939 }
940
941 if (rlen && u->source_skip % u->source_output_blocksize) {
942 u->sink_skip += (uint64_t) (u->source_output_blocksize - (u->source_skip % u->source_output_blocksize)) * u->sink_blocksize / u->source_output_blocksize;
943 u->source_skip -= (u->source_skip % u->source_output_blocksize);
944 }
945 }
946
947 /* And for the sink, these samples have been played back already, so we can
948 * just drop them and get on with it. */
949 if (PA_UNLIKELY(u->sink_skip)) {
950 to_skip = plen >= u->sink_skip ? u->sink_skip : plen;
951
952 pa_memblockq_drop(u->sink_memblockq, to_skip);
953
954 plen -= to_skip;
955 u->sink_skip -= to_skip;
956 }
957
958 /* process and push out samples */
959 if (u->ec->params.drift_compensation)
960 do_push_drift_comp(u);
961 else
962 do_push(u);
963 }
964
965 /* Called from sink I/O thread context. */
966 static int sink_input_pop_cb(pa_sink_input *i, size_t nbytes, pa_memchunk *chunk) {
967 struct userdata *u;
968
969 pa_sink_input_assert_ref(i);
970 pa_assert(chunk);
971 pa_assert_se(u = i->userdata);
972
973 if (u->sink->thread_info.rewind_requested)
974 pa_sink_process_rewind(u->sink, 0);
975
976 pa_sink_render_full(u->sink, nbytes, chunk);
977
978 if (i->thread_info.underrun_for > 0) {
979 pa_log_debug("Handling end of underrun.");
980 pa_atomic_store(&u->request_resync, 1);
981 }
982
983 /* let source thread handle the chunk. pass the sample count as well so that
984 * the source IO thread can update the right variables. */
985 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_POST,
986 NULL, 0, chunk, NULL);
987 u->send_counter += chunk->length;
988
989 return 0;
990 }
991
992 /* Called from source I/O thread context. */
993 static void source_output_process_rewind_cb(pa_source_output *o, size_t nbytes) {
994 struct userdata *u;
995
996 pa_source_output_assert_ref(o);
997 pa_source_output_assert_io_context(o);
998 pa_assert_se(u = o->userdata);
999
1000 pa_source_process_rewind(u->source, nbytes);
1001
1002 /* go back on read side, we need to use older sink data for this */
1003 pa_memblockq_rewind(u->sink_memblockq, nbytes);
1004
1005 /* manipulate write index */
1006 pa_memblockq_seek(u->source_memblockq, -nbytes, PA_SEEK_RELATIVE, true);
1007
1008 pa_log_debug("Source rewind (%lld) %lld", (long long) nbytes,
1009 (long long) pa_memblockq_get_length (u->source_memblockq));
1010 }
1011
1012 /* Called from sink I/O thread context. */
1013 static void sink_input_process_rewind_cb(pa_sink_input *i, size_t nbytes) {
1014 struct userdata *u;
1015
1016 pa_sink_input_assert_ref(i);
1017 pa_assert_se(u = i->userdata);
1018
1019 pa_log_debug("Sink process rewind %lld", (long long) nbytes);
1020
1021 pa_sink_process_rewind(u->sink, nbytes);
1022
1023 pa_asyncmsgq_post(u->asyncmsgq, PA_MSGOBJECT(u->source_output), SOURCE_OUTPUT_MESSAGE_REWIND, NULL, (int64_t) nbytes, NULL, NULL);
1024 u->send_counter -= nbytes;
1025 }
1026
1027 /* Called from source I/O thread context. */
1028 static void source_output_snapshot_within_thread(struct userdata *u, struct snapshot *snapshot) {
1029 size_t delay, rlen, plen;
1030 pa_usec_t now, latency;
1031
1032 now = pa_rtclock_now();
1033 latency = pa_source_get_latency_within_thread(u->source_output->source);
1034 delay = pa_memblockq_get_length(u->source_output->thread_info.delay_memblockq);
1035
1036 delay = (u->source_output->thread_info.resampler ? pa_resampler_request(u->source_output->thread_info.resampler, delay) : delay);
1037 rlen = pa_memblockq_get_length(u->source_memblockq);
1038 plen = pa_memblockq_get_length(u->sink_memblockq);
1039
1040 snapshot->source_now = now;
1041 snapshot->source_latency = latency;
1042 snapshot->source_delay = delay;
1043 snapshot->recv_counter = u->recv_counter;
1044 snapshot->rlen = rlen + u->sink_skip;
1045 snapshot->plen = plen + u->source_skip;
1046 }
1047
1048 /* Called from source I/O thread context. */
1049 static int source_output_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1050 struct userdata *u = PA_SOURCE_OUTPUT(obj)->userdata;
1051
1052 switch (code) {
1053
1054 case SOURCE_OUTPUT_MESSAGE_POST:
1055
1056 pa_source_output_assert_io_context(u->source_output);
1057
1058 if (u->source_output->source->thread_info.state == PA_SOURCE_RUNNING)
1059 pa_memblockq_push_align(u->sink_memblockq, chunk);
1060 else
1061 pa_memblockq_flush_write(u->sink_memblockq, true);
1062
1063 u->recv_counter += (int64_t) chunk->length;
1064
1065 return 0;
1066
1067 case SOURCE_OUTPUT_MESSAGE_REWIND:
1068 pa_source_output_assert_io_context(u->source_output);
1069
1070 /* manipulate write index, never go past what we have */
1071 if (PA_SOURCE_IS_OPENED(u->source_output->source->thread_info.state))
1072 pa_memblockq_seek(u->sink_memblockq, -offset, PA_SEEK_RELATIVE, true);
1073 else
1074 pa_memblockq_flush_write(u->sink_memblockq, true);
1075
1076 pa_log_debug("Sink rewind (%lld)", (long long) offset);
1077
1078 u->recv_counter -= offset;
1079
1080 return 0;
1081
1082 case SOURCE_OUTPUT_MESSAGE_LATENCY_SNAPSHOT: {
1083 struct snapshot *snapshot = (struct snapshot *) data;
1084
1085 source_output_snapshot_within_thread(u, snapshot);
1086 return 0;
1087 }
1088
1089 case SOURCE_OUTPUT_MESSAGE_APPLY_DIFF_TIME:
1090 apply_diff_time(u, offset);
1091 return 0;
1092
1093 }
1094
1095 return pa_source_output_process_msg(obj, code, data, offset, chunk);
1096 }
1097
1098 /* Called from sink I/O thread context. */
1099 static int sink_input_process_msg_cb(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
1100 struct userdata *u = PA_SINK_INPUT(obj)->userdata;
1101
1102 switch (code) {
1103
1104 case SINK_INPUT_MESSAGE_LATENCY_SNAPSHOT: {
1105 size_t delay;
1106 pa_usec_t now, latency;
1107 struct snapshot *snapshot = (struct snapshot *) data;
1108
1109 pa_sink_input_assert_io_context(u->sink_input);
1110
1111 now = pa_rtclock_now();
1112 latency = pa_sink_get_latency_within_thread(u->sink_input->sink);
1113 delay = pa_memblockq_get_length(u->sink_input->thread_info.render_memblockq);
1114
1115 delay = (u->sink_input->thread_info.resampler ? pa_resampler_request(u->sink_input->thread_info.resampler, delay) : delay);
1116
1117 snapshot->sink_now = now;
1118 snapshot->sink_latency = latency;
1119 snapshot->sink_delay = delay;
1120 snapshot->send_counter = u->send_counter;
1121 return 0;
1122 }
1123 }
1124
1125 return pa_sink_input_process_msg(obj, code, data, offset, chunk);
1126 }
1127
1128 /* Called from sink I/O thread context. */
1129 static void sink_input_update_max_rewind_cb(pa_sink_input *i, size_t nbytes) {
1130 struct userdata *u;
1131
1132 pa_sink_input_assert_ref(i);
1133 pa_assert_se(u = i->userdata);
1134
1135 pa_log_debug("Sink input update max rewind %lld", (long long) nbytes);
1136
1137 /* FIXME: Too small max_rewind:
1138 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1139 pa_memblockq_set_maxrewind(u->sink_memblockq, nbytes);
1140 pa_sink_set_max_rewind_within_thread(u->sink, nbytes);
1141 }
1142
1143 /* Called from source I/O thread context. */
1144 static void source_output_update_max_rewind_cb(pa_source_output *o, size_t nbytes) {
1145 struct userdata *u;
1146
1147 pa_source_output_assert_ref(o);
1148 pa_assert_se(u = o->userdata);
1149
1150 pa_log_debug("Source output update max rewind %lld", (long long) nbytes);
1151
1152 pa_source_set_max_rewind_within_thread(u->source, nbytes);
1153 }
1154
1155 /* Called from sink I/O thread context. */
1156 static void sink_input_update_max_request_cb(pa_sink_input *i, size_t nbytes) {
1157 struct userdata *u;
1158
1159 pa_sink_input_assert_ref(i);
1160 pa_assert_se(u = i->userdata);
1161
1162 pa_log_debug("Sink input update max request %lld", (long long) nbytes);
1163
1164 pa_sink_set_max_request_within_thread(u->sink, nbytes);
1165 }
1166
1167 /* Called from sink I/O thread context. */
1168 static void sink_input_update_sink_requested_latency_cb(pa_sink_input *i) {
1169 struct userdata *u;
1170 pa_usec_t latency;
1171
1172 pa_sink_input_assert_ref(i);
1173 pa_assert_se(u = i->userdata);
1174
1175 latency = pa_sink_get_requested_latency_within_thread(i->sink);
1176
1177 pa_log_debug("Sink input update requested latency %lld", (long long) latency);
1178 }
1179
1180 /* Called from source I/O thread context. */
1181 static void source_output_update_source_requested_latency_cb(pa_source_output *o) {
1182 struct userdata *u;
1183 pa_usec_t latency;
1184
1185 pa_source_output_assert_ref(o);
1186 pa_assert_se(u = o->userdata);
1187
1188 latency = pa_source_get_requested_latency_within_thread(o->source);
1189
1190 pa_log_debug("Source output update requested latency %lld", (long long) latency);
1191 }
1192
1193 /* Called from sink I/O thread context. */
1194 static void sink_input_update_sink_latency_range_cb(pa_sink_input *i) {
1195 struct userdata *u;
1196
1197 pa_sink_input_assert_ref(i);
1198 pa_assert_se(u = i->userdata);
1199
1200 pa_log_debug("Sink input update latency range %lld %lld",
1201 (long long) i->sink->thread_info.min_latency,
1202 (long long) i->sink->thread_info.max_latency);
1203
1204 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1205 }
1206
1207 /* Called from source I/O thread context. */
1208 static void source_output_update_source_latency_range_cb(pa_source_output *o) {
1209 struct userdata *u;
1210
1211 pa_source_output_assert_ref(o);
1212 pa_assert_se(u = o->userdata);
1213
1214 pa_log_debug("Source output update latency range %lld %lld",
1215 (long long) o->source->thread_info.min_latency,
1216 (long long) o->source->thread_info.max_latency);
1217
1218 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1219 }
1220
1221 /* Called from sink I/O thread context. */
1222 static void sink_input_update_sink_fixed_latency_cb(pa_sink_input *i) {
1223 struct userdata *u;
1224
1225 pa_sink_input_assert_ref(i);
1226 pa_assert_se(u = i->userdata);
1227
1228 pa_log_debug("Sink input update fixed latency %lld",
1229 (long long) i->sink->thread_info.fixed_latency);
1230
1231 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1232 }
1233
1234 /* Called from source I/O thread context. */
1235 static void source_output_update_source_fixed_latency_cb(pa_source_output *o) {
1236 struct userdata *u;
1237
1238 pa_source_output_assert_ref(o);
1239 pa_assert_se(u = o->userdata);
1240
1241 pa_log_debug("Source output update fixed latency %lld",
1242 (long long) o->source->thread_info.fixed_latency);
1243
1244 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1245 }
1246
1247 /* Called from source I/O thread context. */
1248 static void source_output_attach_cb(pa_source_output *o) {
1249 struct userdata *u;
1250
1251 pa_source_output_assert_ref(o);
1252 pa_source_output_assert_io_context(o);
1253 pa_assert_se(u = o->userdata);
1254
1255 pa_source_set_rtpoll(u->source, o->source->thread_info.rtpoll);
1256 pa_source_set_latency_range_within_thread(u->source, o->source->thread_info.min_latency, o->source->thread_info.max_latency);
1257 pa_source_set_fixed_latency_within_thread(u->source, o->source->thread_info.fixed_latency);
1258 pa_source_set_max_rewind_within_thread(u->source, pa_source_output_get_max_rewind(o));
1259
1260 pa_log_debug("Source output %d attach", o->index);
1261
1262 pa_source_attach_within_thread(u->source);
1263
1264 u->rtpoll_item_read = pa_rtpoll_item_new_asyncmsgq_read(
1265 o->source->thread_info.rtpoll,
1266 PA_RTPOLL_LATE,
1267 u->asyncmsgq);
1268 }
1269
1270 /* Called from sink I/O thread context. */
1271 static void sink_input_attach_cb(pa_sink_input *i) {
1272 struct userdata *u;
1273
1274 pa_sink_input_assert_ref(i);
1275 pa_assert_se(u = i->userdata);
1276
1277 pa_sink_set_rtpoll(u->sink, i->sink->thread_info.rtpoll);
1278 pa_sink_set_latency_range_within_thread(u->sink, i->sink->thread_info.min_latency, i->sink->thread_info.max_latency);
1279
1280 /* (8.1) IF YOU NEED A FIXED BLOCK SIZE ADD THE LATENCY FOR ONE
1281 * BLOCK MINUS ONE SAMPLE HERE. SEE (7) */
1282 pa_sink_set_fixed_latency_within_thread(u->sink, i->sink->thread_info.fixed_latency);
1283
1284 /* (8.2) IF YOU NEED A FIXED BLOCK SIZE ROUND
1285 * pa_sink_input_get_max_request(i) UP TO MULTIPLES OF IT
1286 * HERE. SEE (6) */
1287 pa_sink_set_max_request_within_thread(u->sink, pa_sink_input_get_max_request(i));
1288
1289 /* FIXME: Too small max_rewind:
1290 * https://bugs.freedesktop.org/show_bug.cgi?id=53709 */
1291 pa_sink_set_max_rewind_within_thread(u->sink, pa_sink_input_get_max_rewind(i));
1292
1293 pa_log_debug("Sink input %d attach", i->index);
1294
1295 u->rtpoll_item_write = pa_rtpoll_item_new_asyncmsgq_write(
1296 i->sink->thread_info.rtpoll,
1297 PA_RTPOLL_LATE,
1298 u->asyncmsgq);
1299
1300 pa_sink_attach_within_thread(u->sink);
1301 }
1302
1303 /* Called from source I/O thread context. */
1304 static void source_output_detach_cb(pa_source_output *o) {
1305 struct userdata *u;
1306
1307 pa_source_output_assert_ref(o);
1308 pa_source_output_assert_io_context(o);
1309 pa_assert_se(u = o->userdata);
1310
1311 pa_source_detach_within_thread(u->source);
1312 pa_source_set_rtpoll(u->source, NULL);
1313
1314 pa_log_debug("Source output %d detach", o->index);
1315
1316 if (u->rtpoll_item_read) {
1317 pa_rtpoll_item_free(u->rtpoll_item_read);
1318 u->rtpoll_item_read = NULL;
1319 }
1320 }
1321
1322 /* Called from sink I/O thread context. */
1323 static void sink_input_detach_cb(pa_sink_input *i) {
1324 struct userdata *u;
1325
1326 pa_sink_input_assert_ref(i);
1327 pa_assert_se(u = i->userdata);
1328
1329 pa_sink_detach_within_thread(u->sink);
1330
1331 pa_sink_set_rtpoll(u->sink, NULL);
1332
1333 pa_log_debug("Sink input %d detach", i->index);
1334
1335 if (u->rtpoll_item_write) {
1336 pa_rtpoll_item_free(u->rtpoll_item_write);
1337 u->rtpoll_item_write = NULL;
1338 }
1339 }
1340
1341 /* Called from source I/O thread context. */
1342 static void source_output_state_change_cb(pa_source_output *o, pa_source_output_state_t state) {
1343 struct userdata *u;
1344
1345 pa_source_output_assert_ref(o);
1346 pa_source_output_assert_io_context(o);
1347 pa_assert_se(u = o->userdata);
1348
1349 pa_log_debug("Source output %d state %d", o->index, state);
1350 }
1351
1352 /* Called from sink I/O thread context. */
1353 static void sink_input_state_change_cb(pa_sink_input *i, pa_sink_input_state_t state) {
1354 struct userdata *u;
1355
1356 pa_sink_input_assert_ref(i);
1357 pa_assert_se(u = i->userdata);
1358
1359 pa_log_debug("Sink input %d state %d", i->index, state);
1360
1361 /* If we are added for the first time, ask for a rewinding so that
1362 * we are heard right-away. */
1363 if (PA_SINK_INPUT_IS_LINKED(state) &&
1364 i->thread_info.state == PA_SINK_INPUT_INIT) {
1365 pa_log_debug("Requesting rewind due to state change.");
1366 pa_sink_input_request_rewind(i, 0, false, true, true);
1367 }
1368 }
1369
1370 /* Called from main context. */
1371 static void source_output_kill_cb(pa_source_output *o) {
1372 struct userdata *u;
1373
1374 pa_source_output_assert_ref(o);
1375 pa_assert_ctl_context();
1376 pa_assert_se(u = o->userdata);
1377
1378 u->dead = true;
1379
1380 /* The order here matters! We first kill the source output, followed
1381 * by the source. That means the source callbacks must be protected
1382 * against an unconnected source output! */
1383 pa_source_output_unlink(u->source_output);
1384 pa_source_unlink(u->source);
1385
1386 pa_source_output_unref(u->source_output);
1387 u->source_output = NULL;
1388
1389 pa_source_unref(u->source);
1390 u->source = NULL;
1391
1392 pa_log_debug("Source output kill %d", o->index);
1393
1394 pa_module_unload_request(u->module, true);
1395 }
1396
1397 /* Called from main context */
1398 static void sink_input_kill_cb(pa_sink_input *i) {
1399 struct userdata *u;
1400
1401 pa_sink_input_assert_ref(i);
1402 pa_assert_se(u = i->userdata);
1403
1404 u->dead = true;
1405
1406 /* The order here matters! We first kill the sink input, followed
1407 * by the sink. That means the sink callbacks must be protected
1408 * against an unconnected sink input! */
1409 pa_sink_input_unlink(u->sink_input);
1410 pa_sink_unlink(u->sink);
1411
1412 pa_sink_input_unref(u->sink_input);
1413 u->sink_input = NULL;
1414
1415 pa_sink_unref(u->sink);
1416 u->sink = NULL;
1417
1418 pa_log_debug("Sink input kill %d", i->index);
1419
1420 pa_module_unload_request(u->module, true);
1421 }
1422
1423 /* Called from main context. */
1424 static bool source_output_may_move_to_cb(pa_source_output *o, pa_source *dest) {
1425 struct userdata *u;
1426
1427 pa_source_output_assert_ref(o);
1428 pa_assert_ctl_context();
1429 pa_assert_se(u = o->userdata);
1430
1431 if (u->dead || u->autoloaded)
1432 return false;
1433
1434 return (u->source != dest) && (u->sink != dest->monitor_of);
1435 }
1436
1437 /* Called from main context */
1438 static bool sink_input_may_move_to_cb(pa_sink_input *i, pa_sink *dest) {
1439 struct userdata *u;
1440
1441 pa_sink_input_assert_ref(i);
1442 pa_assert_se(u = i->userdata);
1443
1444 if (u->dead || u->autoloaded)
1445 return false;
1446
1447 return u->sink != dest;
1448 }
1449
1450 /* Called from main context. */
1451 static void source_output_moving_cb(pa_source_output *o, pa_source *dest) {
1452 struct userdata *u;
1453
1454 pa_source_output_assert_ref(o);
1455 pa_assert_ctl_context();
1456 pa_assert_se(u = o->userdata);
1457
1458 if (dest) {
1459 pa_source_set_asyncmsgq(u->source, dest->asyncmsgq);
1460 pa_source_update_flags(u->source, PA_SOURCE_LATENCY|PA_SOURCE_DYNAMIC_LATENCY, dest->flags);
1461 } else
1462 pa_source_set_asyncmsgq(u->source, NULL);
1463
1464 if (u->source_auto_desc && dest) {
1465 const char *y, *z;
1466 pa_proplist *pl;
1467
1468 pl = pa_proplist_new();
1469 y = pa_proplist_gets(u->sink_input->sink->proplist, PA_PROP_DEVICE_DESCRIPTION);
1470 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1471 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1472 y ? y : u->sink_input->sink->name);
1473
1474 pa_source_update_proplist(u->source, PA_UPDATE_REPLACE, pl);
1475 pa_proplist_free(pl);
1476 }
1477 }
1478
1479 /* Called from main context */
1480 static void sink_input_moving_cb(pa_sink_input *i, pa_sink *dest) {
1481 struct userdata *u;
1482
1483 pa_sink_input_assert_ref(i);
1484 pa_assert_se(u = i->userdata);
1485
1486 if (dest) {
1487 pa_sink_set_asyncmsgq(u->sink, dest->asyncmsgq);
1488 pa_sink_update_flags(u->sink, PA_SINK_LATENCY|PA_SINK_DYNAMIC_LATENCY, dest->flags);
1489 } else
1490 pa_sink_set_asyncmsgq(u->sink, NULL);
1491
1492 if (u->sink_auto_desc && dest) {
1493 const char *y, *z;
1494 pa_proplist *pl;
1495
1496 pl = pa_proplist_new();
1497 y = pa_proplist_gets(u->source_output->source->proplist, PA_PROP_DEVICE_DESCRIPTION);
1498 z = pa_proplist_gets(dest->proplist, PA_PROP_DEVICE_DESCRIPTION);
1499 pa_proplist_setf(pl, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)", z ? z : dest->name,
1500 y ? y : u->source_output->source->name);
1501
1502 pa_sink_update_proplist(u->sink, PA_UPDATE_REPLACE, pl);
1503 pa_proplist_free(pl);
1504 }
1505 }
1506
1507 /* Called from main context */
1508 static void sink_input_volume_changed_cb(pa_sink_input *i) {
1509 struct userdata *u;
1510
1511 pa_sink_input_assert_ref(i);
1512 pa_assert_se(u = i->userdata);
1513
1514 pa_sink_volume_changed(u->sink, &i->volume);
1515 }
1516
1517 /* Called from main context */
1518 static void sink_input_mute_changed_cb(pa_sink_input *i) {
1519 struct userdata *u;
1520
1521 pa_sink_input_assert_ref(i);
1522 pa_assert_se(u = i->userdata);
1523
1524 pa_sink_mute_changed(u->sink, i->muted);
1525 }
1526
1527 /* Called from main context */
1528 static int canceller_process_msg_cb(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
1529 struct pa_echo_canceller_msg *msg;
1530 struct userdata *u;
1531
1532 pa_assert(o);
1533
1534 msg = PA_ECHO_CANCELLER_MSG(o);
1535 u = msg->userdata;
1536
1537 switch (code) {
1538 case ECHO_CANCELLER_MESSAGE_SET_VOLUME: {
1539 pa_cvolume *v = (pa_cvolume *) userdata;
1540
1541 if (u->use_volume_sharing)
1542 pa_source_set_volume(u->source, v, true, false);
1543 else
1544 pa_source_output_set_volume(u->source_output, v, false, true);
1545
1546 break;
1547 }
1548
1549 default:
1550 pa_assert_not_reached();
1551 break;
1552 }
1553
1554 return 0;
1555 }
1556
1557 /* Called by the canceller, so source I/O thread context. */
1558 void pa_echo_canceller_get_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1559 *v = ec->msg->userdata->thread_info.current_volume;
1560 }
1561
1562 /* Called by the canceller, so source I/O thread context. */
1563 void pa_echo_canceller_set_capture_volume(pa_echo_canceller *ec, pa_cvolume *v) {
1564 if (!pa_cvolume_equal(&ec->msg->userdata->thread_info.current_volume, v)) {
1565 pa_cvolume *vol = pa_xnewdup(pa_cvolume, v, 1);
1566
1567 pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(ec->msg), ECHO_CANCELLER_MESSAGE_SET_VOLUME, vol, 0, NULL,
1568 pa_xfree);
1569 }
1570 }
1571
1572 uint32_t pa_echo_canceller_blocksize_power2(unsigned rate, unsigned ms) {
1573 unsigned nframes = (rate * ms) / 1000;
1574 uint32_t y = 1 << ((8 * sizeof(uint32_t)) - 2);
1575
1576 assert(rate >= 4000);
1577 assert(ms >= 1);
1578
1579 /* nframes should be a power of 2, round down to nearest power of two */
1580 while (y > nframes)
1581 y >>= 1;
1582
1583 assert(y >= 1);
1584 return y;
1585 }
1586
1587 static pa_echo_canceller_method_t get_ec_method_from_string(const char *method) {
1588 if (pa_streq(method, "null"))
1589 return PA_ECHO_CANCELLER_NULL;
1590 #ifdef HAVE_SPEEX
1591 if (pa_streq(method, "speex"))
1592 return PA_ECHO_CANCELLER_SPEEX;
1593 #endif
1594 #ifdef HAVE_ADRIAN_EC
1595 if (pa_streq(method, "adrian"))
1596 return PA_ECHO_CANCELLER_ADRIAN;
1597 #endif
1598 #ifdef HAVE_WEBRTC
1599 if (pa_streq(method, "webrtc"))
1600 return PA_ECHO_CANCELLER_WEBRTC;
1601 #endif
1602 return PA_ECHO_CANCELLER_INVALID;
1603 }
1604
1605 /* Common initialisation bits between module-echo-cancel and the standalone
1606 * test program.
1607 *
1608 * Called from main context. */
1609 static int init_common(pa_modargs *ma, struct userdata *u, pa_sample_spec *source_ss, pa_channel_map *source_map) {
1610 const char *ec_string;
1611 pa_echo_canceller_method_t ec_method;
1612
1613 if (pa_modargs_get_sample_spec_and_channel_map(ma, source_ss, source_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
1614 pa_log("Invalid sample format specification or channel map");
1615 goto fail;
1616 }
1617
1618 u->ec = pa_xnew0(pa_echo_canceller, 1);
1619 if (!u->ec) {
1620 pa_log("Failed to alloc echo canceller");
1621 goto fail;
1622 }
1623
1624 ec_string = pa_modargs_get_value(ma, "aec_method", DEFAULT_ECHO_CANCELLER);
1625 if ((ec_method = get_ec_method_from_string(ec_string)) < 0) {
1626 pa_log("Invalid echo canceller implementation '%s'", ec_string);
1627 goto fail;
1628 }
1629
1630 pa_log_info("Using AEC engine: %s", ec_string);
1631
1632 u->ec->init = ec_table[ec_method].init;
1633 u->ec->play = ec_table[ec_method].play;
1634 u->ec->record = ec_table[ec_method].record;
1635 u->ec->set_drift = ec_table[ec_method].set_drift;
1636 u->ec->run = ec_table[ec_method].run;
1637 u->ec->done = ec_table[ec_method].done;
1638
1639 return 0;
1640
1641 fail:
1642 return -1;
1643 }
1644
1645 /* Called from main context. */
1646 int pa__init(pa_module*m) {
1647 struct userdata *u;
1648 pa_sample_spec source_output_ss, source_ss, sink_ss;
1649 pa_channel_map source_output_map, source_map, sink_map;
1650 pa_modargs *ma;
1651 pa_source *source_master=NULL;
1652 pa_sink *sink_master=NULL;
1653 pa_source_output_new_data source_output_data;
1654 pa_sink_input_new_data sink_input_data;
1655 pa_source_new_data source_data;
1656 pa_sink_new_data sink_data;
1657 pa_memchunk silence;
1658 uint32_t temp;
1659 uint32_t nframes = 0;
1660
1661 pa_assert(m);
1662
1663 if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
1664 pa_log("Failed to parse module arguments.");
1665 goto fail;
1666 }
1667
1668 if (!(source_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source_master", NULL), PA_NAMEREG_SOURCE))) {
1669 pa_log("Master source not found");
1670 goto fail;
1671 }
1672 pa_assert(source_master);
1673
1674 if (!(sink_master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink_master", NULL), PA_NAMEREG_SINK))) {
1675 pa_log("Master sink not found");
1676 goto fail;
1677 }
1678 pa_assert(sink_master);
1679
1680 if (source_master->monitor_of == sink_master) {
1681 pa_log("Can't cancel echo between a sink and its monitor");
1682 goto fail;
1683 }
1684
1685 source_ss = source_master->sample_spec;
1686 source_ss.rate = DEFAULT_RATE;
1687 source_ss.channels = DEFAULT_CHANNELS;
1688 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
1689
1690 sink_ss = sink_master->sample_spec;
1691 sink_map = sink_master->channel_map;
1692
1693 u = pa_xnew0(struct userdata, 1);
1694 if (!u) {
1695 pa_log("Failed to alloc userdata");
1696 goto fail;
1697 }
1698 u->core = m->core;
1699 u->module = m;
1700 m->userdata = u;
1701 u->dead = false;
1702
1703 u->use_volume_sharing = true;
1704 if (pa_modargs_get_value_boolean(ma, "use_volume_sharing", &u->use_volume_sharing) < 0) {
1705 pa_log("use_volume_sharing= expects a boolean argument");
1706 goto fail;
1707 }
1708
1709 temp = DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC;
1710 if (pa_modargs_get_value_u32(ma, "adjust_time", &temp) < 0) {
1711 pa_log("Failed to parse adjust_time value");
1712 goto fail;
1713 }
1714
1715 if (temp != DEFAULT_ADJUST_TIME_USEC / PA_USEC_PER_SEC)
1716 u->adjust_time = temp * PA_USEC_PER_SEC;
1717 else
1718 u->adjust_time = DEFAULT_ADJUST_TIME_USEC;
1719
1720 temp = DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC;
1721 if (pa_modargs_get_value_u32(ma, "adjust_threshold", &temp) < 0) {
1722 pa_log("Failed to parse adjust_threshold value");
1723 goto fail;
1724 }
1725
1726 if (temp != DEFAULT_ADJUST_TOLERANCE / PA_USEC_PER_MSEC)
1727 u->adjust_threshold = temp * PA_USEC_PER_MSEC;
1728 else
1729 u->adjust_threshold = DEFAULT_ADJUST_TOLERANCE;
1730
1731 u->save_aec = DEFAULT_SAVE_AEC;
1732 if (pa_modargs_get_value_boolean(ma, "save_aec", &u->save_aec) < 0) {
1733 pa_log("Failed to parse save_aec value");
1734 goto fail;
1735 }
1736
1737 u->autoloaded = DEFAULT_AUTOLOADED;
1738 if (pa_modargs_get_value_boolean(ma, "autoloaded", &u->autoloaded) < 0) {
1739 pa_log("Failed to parse autoloaded value");
1740 goto fail;
1741 }
1742
1743 if (init_common(ma, u, &source_ss, &source_map) < 0)
1744 goto fail;
1745
1746 u->asyncmsgq = pa_asyncmsgq_new(0);
1747 u->need_realign = true;
1748
1749 source_output_ss = source_ss;
1750 source_output_map = source_map;
1751
1752 if (sink_ss.rate != source_ss.rate) {
1753 pa_log_info("Sample rates of play and out stream differ. Adjusting rate of play stream.");
1754 sink_ss.rate = source_ss.rate;
1755 }
1756
1757 pa_assert(u->ec->init);
1758 if (!u->ec->init(u->core, u->ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes, pa_modargs_get_value(ma, "aec_args", NULL))) {
1759 pa_log("Failed to init AEC engine");
1760 goto fail;
1761 }
1762
1763 pa_assert(source_output_ss.rate == source_ss.rate);
1764 pa_assert(sink_ss.rate == source_ss.rate);
1765
1766 u->source_output_blocksize = nframes * pa_frame_size(&source_output_ss);
1767 u->source_blocksize = nframes * pa_frame_size(&source_ss);
1768 u->sink_blocksize = nframes * pa_frame_size(&sink_ss);
1769
1770 if (u->ec->params.drift_compensation)
1771 pa_assert(u->ec->set_drift);
1772
1773 /* Create source */
1774 pa_source_new_data_init(&source_data);
1775 source_data.driver = __FILE__;
1776 source_data.module = m;
1777 if (!(source_data.name = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
1778 source_data.name = pa_sprintf_malloc("%s.echo-cancel", source_master->name);
1779 pa_source_new_data_set_sample_spec(&source_data, &source_ss);
1780 pa_source_new_data_set_channel_map(&source_data, &source_map);
1781 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, source_master->name);
1782 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1783 if (!u->autoloaded)
1784 pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1785
1786 if (pa_modargs_get_proplist(ma, "source_properties", source_data.proplist, PA_UPDATE_REPLACE) < 0) {
1787 pa_log("Invalid properties");
1788 pa_source_new_data_done(&source_data);
1789 goto fail;
1790 }
1791
1792 if ((u->source_auto_desc = !pa_proplist_contains(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1793 const char *y, *z;
1794
1795 y = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1796 z = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1797 pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1798 z ? z : source_master->name, y ? y : sink_master->name);
1799 }
1800
1801 u->source = pa_source_new(m->core, &source_data, (source_master->flags & (PA_SOURCE_LATENCY | PA_SOURCE_DYNAMIC_LATENCY))
1802 | (u->use_volume_sharing ? PA_SOURCE_SHARE_VOLUME_WITH_MASTER : 0));
1803 pa_source_new_data_done(&source_data);
1804
1805 if (!u->source) {
1806 pa_log("Failed to create source.");
1807 goto fail;
1808 }
1809
1810 u->source->parent.process_msg = source_process_msg_cb;
1811 u->source->set_state = source_set_state_cb;
1812 u->source->update_requested_latency = source_update_requested_latency_cb;
1813 pa_source_set_get_mute_callback(u->source, source_get_mute_cb);
1814 pa_source_set_set_mute_callback(u->source, source_set_mute_cb);
1815 if (!u->use_volume_sharing) {
1816 pa_source_set_get_volume_callback(u->source, source_get_volume_cb);
1817 pa_source_set_set_volume_callback(u->source, source_set_volume_cb);
1818 pa_source_enable_decibel_volume(u->source, true);
1819 }
1820 u->source->userdata = u;
1821
1822 pa_source_set_asyncmsgq(u->source, source_master->asyncmsgq);
1823
1824 /* Create sink */
1825 pa_sink_new_data_init(&sink_data);
1826 sink_data.driver = __FILE__;
1827 sink_data.module = m;
1828 if (!(sink_data.name = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
1829 sink_data.name = pa_sprintf_malloc("%s.echo-cancel", sink_master->name);
1830 pa_sink_new_data_set_sample_spec(&sink_data, &sink_ss);
1831 pa_sink_new_data_set_channel_map(&sink_data, &sink_map);
1832 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_MASTER_DEVICE, sink_master->name);
1833 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_CLASS, "filter");
1834 if (!u->autoloaded)
1835 pa_proplist_sets(sink_data.proplist, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
1836
1837 if (pa_modargs_get_proplist(ma, "sink_properties", sink_data.proplist, PA_UPDATE_REPLACE) < 0) {
1838 pa_log("Invalid properties");
1839 pa_sink_new_data_done(&sink_data);
1840 goto fail;
1841 }
1842
1843 if ((u->sink_auto_desc = !pa_proplist_contains(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION))) {
1844 const char *y, *z;
1845
1846 y = pa_proplist_gets(source_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1847 z = pa_proplist_gets(sink_master->proplist, PA_PROP_DEVICE_DESCRIPTION);
1848 pa_proplist_setf(sink_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "%s (echo cancelled with %s)",
1849 z ? z : sink_master->name, y ? y : source_master->name);
1850 }
1851
1852 u->sink = pa_sink_new(m->core, &sink_data, (sink_master->flags & (PA_SINK_LATENCY | PA_SINK_DYNAMIC_LATENCY))
1853 | (u->use_volume_sharing ? PA_SINK_SHARE_VOLUME_WITH_MASTER : 0));
1854 pa_sink_new_data_done(&sink_data);
1855
1856 if (!u->sink) {
1857 pa_log("Failed to create sink.");
1858 goto fail;
1859 }
1860
1861 u->sink->parent.process_msg = sink_process_msg_cb;
1862 u->sink->set_state = sink_set_state_cb;
1863 u->sink->update_requested_latency = sink_update_requested_latency_cb;
1864 u->sink->request_rewind = sink_request_rewind_cb;
1865 pa_sink_set_set_mute_callback(u->sink, sink_set_mute_cb);
1866 if (!u->use_volume_sharing) {
1867 pa_sink_set_set_volume_callback(u->sink, sink_set_volume_cb);
1868 pa_sink_enable_decibel_volume(u->sink, true);
1869 }
1870 u->sink->userdata = u;
1871
1872 pa_sink_set_asyncmsgq(u->sink, sink_master->asyncmsgq);
1873
1874 /* Create source output */
1875 pa_source_output_new_data_init(&source_output_data);
1876 source_output_data.driver = __FILE__;
1877 source_output_data.module = m;
1878 pa_source_output_new_data_set_source(&source_output_data, source_master, false);
1879 source_output_data.destination_source = u->source;
1880
1881 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Source Stream");
1882 pa_proplist_sets(source_output_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1883 pa_source_output_new_data_set_sample_spec(&source_output_data, &source_output_ss);
1884 pa_source_output_new_data_set_channel_map(&source_output_data, &source_output_map);
1885
1886 pa_source_output_new(&u->source_output, m->core, &source_output_data);
1887 pa_source_output_new_data_done(&source_output_data);
1888
1889 if (!u->source_output)
1890 goto fail;
1891
1892 u->source_output->parent.process_msg = source_output_process_msg_cb;
1893 u->source_output->push = source_output_push_cb;
1894 u->source_output->process_rewind = source_output_process_rewind_cb;
1895 u->source_output->update_max_rewind = source_output_update_max_rewind_cb;
1896 u->source_output->update_source_requested_latency = source_output_update_source_requested_latency_cb;
1897 u->source_output->update_source_latency_range = source_output_update_source_latency_range_cb;
1898 u->source_output->update_source_fixed_latency = source_output_update_source_fixed_latency_cb;
1899 u->source_output->kill = source_output_kill_cb;
1900 u->source_output->attach = source_output_attach_cb;
1901 u->source_output->detach = source_output_detach_cb;
1902 u->source_output->state_change = source_output_state_change_cb;
1903 u->source_output->may_move_to = source_output_may_move_to_cb;
1904 u->source_output->moving = source_output_moving_cb;
1905 u->source_output->userdata = u;
1906
1907 u->source->output_from_master = u->source_output;
1908
1909 /* Create sink input */
1910 pa_sink_input_new_data_init(&sink_input_data);
1911 sink_input_data.driver = __FILE__;
1912 sink_input_data.module = m;
1913 pa_sink_input_new_data_set_sink(&sink_input_data, sink_master, false);
1914 sink_input_data.origin_sink = u->sink;
1915 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_NAME, "Echo-Cancel Sink Stream");
1916 pa_proplist_sets(sink_input_data.proplist, PA_PROP_MEDIA_ROLE, "filter");
1917 pa_sink_input_new_data_set_sample_spec(&sink_input_data, &sink_ss);
1918 pa_sink_input_new_data_set_channel_map(&sink_input_data, &sink_map);
1919 sink_input_data.flags = PA_SINK_INPUT_VARIABLE_RATE;
1920
1921 pa_sink_input_new(&u->sink_input, m->core, &sink_input_data);
1922 pa_sink_input_new_data_done(&sink_input_data);
1923
1924 if (!u->sink_input)
1925 goto fail;
1926
1927 u->sink_input->parent.process_msg = sink_input_process_msg_cb;
1928 u->sink_input->pop = sink_input_pop_cb;
1929 u->sink_input->process_rewind = sink_input_process_rewind_cb;
1930 u->sink_input->update_max_rewind = sink_input_update_max_rewind_cb;
1931 u->sink_input->update_max_request = sink_input_update_max_request_cb;
1932 u->sink_input->update_sink_requested_latency = sink_input_update_sink_requested_latency_cb;
1933 u->sink_input->update_sink_latency_range = sink_input_update_sink_latency_range_cb;
1934 u->sink_input->update_sink_fixed_latency = sink_input_update_sink_fixed_latency_cb;
1935 u->sink_input->kill = sink_input_kill_cb;
1936 u->sink_input->attach = sink_input_attach_cb;
1937 u->sink_input->detach = sink_input_detach_cb;
1938 u->sink_input->state_change = sink_input_state_change_cb;
1939 u->sink_input->may_move_to = sink_input_may_move_to_cb;
1940 u->sink_input->moving = sink_input_moving_cb;
1941 if (!u->use_volume_sharing)
1942 u->sink_input->volume_changed = sink_input_volume_changed_cb;
1943 u->sink_input->mute_changed = sink_input_mute_changed_cb;
1944 u->sink_input->userdata = u;
1945
1946 u->sink->input_to_master = u->sink_input;
1947
1948 pa_sink_input_get_silence(u->sink_input, &silence);
1949
1950 u->source_memblockq = pa_memblockq_new("module-echo-cancel source_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1951 &source_output_ss, 1, 1, 0, &silence);
1952 u->sink_memblockq = pa_memblockq_new("module-echo-cancel sink_memblockq", 0, MEMBLOCKQ_MAXLENGTH, 0,
1953 &sink_ss, 0, 1, 0, &silence);
1954
1955 pa_memblock_unref(silence.memblock);
1956
1957 if (!u->source_memblockq || !u->sink_memblockq) {
1958 pa_log("Failed to create memblockq.");
1959 goto fail;
1960 }
1961
1962 if (u->adjust_time > 0 && !u->ec->params.drift_compensation)
1963 u->time_event = pa_core_rttime_new(m->core, pa_rtclock_now() + u->adjust_time, time_callback, u);
1964 else if (u->ec->params.drift_compensation) {
1965 pa_log_info("Canceller does drift compensation -- built-in compensation will be disabled");
1966 u->adjust_time = 0;
1967 /* Perform resync just once to give the canceller a leg up */
1968 pa_atomic_store(&u->request_resync, 1);
1969 }
1970
1971 if (u->save_aec) {
1972 pa_log("Creating AEC files in /tmp");
1973 u->captured_file = fopen("/tmp/aec_rec.sw", "wb");
1974 if (u->captured_file == NULL)
1975 perror ("fopen failed");
1976 u->played_file = fopen("/tmp/aec_play.sw", "wb");
1977 if (u->played_file == NULL)
1978 perror ("fopen failed");
1979 u->canceled_file = fopen("/tmp/aec_out.sw", "wb");
1980 if (u->canceled_file == NULL)
1981 perror ("fopen failed");
1982 if (u->ec->params.drift_compensation) {
1983 u->drift_file = fopen("/tmp/aec_drift.txt", "w");
1984 if (u->drift_file == NULL)
1985 perror ("fopen failed");
1986 }
1987 }
1988
1989 u->ec->msg = pa_msgobject_new(pa_echo_canceller_msg);
1990 u->ec->msg->parent.process_msg = canceller_process_msg_cb;
1991 u->ec->msg->userdata = u;
1992
1993 u->thread_info.current_volume = u->source->reference_volume;
1994
1995 pa_sink_put(u->sink);
1996 pa_source_put(u->source);
1997
1998 pa_sink_input_put(u->sink_input);
1999 pa_source_output_put(u->source_output);
2000 pa_modargs_free(ma);
2001
2002 return 0;
2003
2004 fail:
2005 if (ma)
2006 pa_modargs_free(ma);
2007
2008 pa__done(m);
2009
2010 return -1;
2011 }
2012
2013 /* Called from main context. */
2014 int pa__get_n_used(pa_module *m) {
2015 struct userdata *u;
2016
2017 pa_assert(m);
2018 pa_assert_se(u = m->userdata);
2019
2020 return pa_sink_linked_by(u->sink) + pa_source_linked_by(u->source);
2021 }
2022
2023 /* Called from main context. */
2024 void pa__done(pa_module*m) {
2025 struct userdata *u;
2026
2027 pa_assert(m);
2028
2029 if (!(u = m->userdata))
2030 return;
2031
2032 u->dead = true;
2033
2034 /* See comments in source_output_kill_cb() above regarding
2035 * destruction order! */
2036
2037 if (u->time_event)
2038 u->core->mainloop->time_free(u->time_event);
2039
2040 if (u->source_output)
2041 pa_source_output_unlink(u->source_output);
2042 if (u->sink_input)
2043 pa_sink_input_unlink(u->sink_input);
2044
2045 if (u->source)
2046 pa_source_unlink(u->source);
2047 if (u->sink)
2048 pa_sink_unlink(u->sink);
2049
2050 if (u->source_output)
2051 pa_source_output_unref(u->source_output);
2052 if (u->sink_input)
2053 pa_sink_input_unref(u->sink_input);
2054
2055 if (u->source)
2056 pa_source_unref(u->source);
2057 if (u->sink)
2058 pa_sink_unref(u->sink);
2059
2060 if (u->source_memblockq)
2061 pa_memblockq_free(u->source_memblockq);
2062 if (u->sink_memblockq)
2063 pa_memblockq_free(u->sink_memblockq);
2064
2065 if (u->ec) {
2066 if (u->ec->done)
2067 u->ec->done(u->ec);
2068
2069 pa_xfree(u->ec);
2070 }
2071
2072 if (u->asyncmsgq)
2073 pa_asyncmsgq_unref(u->asyncmsgq);
2074
2075 if (u->save_aec) {
2076 if (u->played_file)
2077 fclose(u->played_file);
2078 if (u->captured_file)
2079 fclose(u->captured_file);
2080 if (u->canceled_file)
2081 fclose(u->canceled_file);
2082 if (u->drift_file)
2083 fclose(u->drift_file);
2084 }
2085
2086 pa_xfree(u);
2087 }
2088
2089 #ifdef ECHO_CANCEL_TEST
2090 /*
2091 * Stand-alone test program for running in the canceller on pre-recorded files.
2092 */
2093 int main(int argc, char* argv[]) {
2094 struct userdata u;
2095 pa_sample_spec source_output_ss, source_ss, sink_ss;
2096 pa_channel_map source_output_map, source_map, sink_map;
2097 pa_modargs *ma = NULL;
2098 uint8_t *rdata = NULL, *pdata = NULL, *cdata = NULL;
2099 int unused PA_GCC_UNUSED;
2100 int ret = 0, i;
2101 char c;
2102 float drift;
2103 uint32_t nframes;
2104
2105 if (!getenv("MAKE_CHECK"))
2106 pa_log_set_level(PA_LOG_DEBUG);
2107
2108 pa_memzero(&u, sizeof(u));
2109
2110 if (argc < 4 || argc > 7) {
2111 goto usage;
2112 }
2113
2114 u.captured_file = fopen(argv[2], "rb");
2115 if (u.captured_file == NULL) {
2116 perror ("Could not open capture file");
2117 goto fail;
2118 }
2119 u.played_file = fopen(argv[1], "rb");
2120 if (u.played_file == NULL) {
2121 perror ("Could not open play file");
2122 goto fail;
2123 }
2124 u.canceled_file = fopen(argv[3], "wb");
2125 if (u.canceled_file == NULL) {
2126 perror ("Could not open canceled file");
2127 goto fail;
2128 }
2129
2130 u.core = pa_xnew0(pa_core, 1);
2131 u.core->cpu_info.cpu_type = PA_CPU_X86;
2132 u.core->cpu_info.flags.x86 |= PA_CPU_X86_SSE;
2133
2134 if (!(ma = pa_modargs_new(argc > 4 ? argv[4] : NULL, valid_modargs))) {
2135 pa_log("Failed to parse module arguments.");
2136 goto fail;
2137 }
2138
2139 source_ss.format = PA_SAMPLE_S16LE;
2140 source_ss.rate = DEFAULT_RATE;
2141 source_ss.channels = DEFAULT_CHANNELS;
2142 pa_channel_map_init_auto(&source_map, source_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2143
2144 sink_ss.format = PA_SAMPLE_S16LE;
2145 sink_ss.rate = DEFAULT_RATE;
2146 sink_ss.channels = DEFAULT_CHANNELS;
2147 pa_channel_map_init_auto(&sink_map, sink_ss.channels, PA_CHANNEL_MAP_DEFAULT);
2148
2149 if (init_common(ma, &u, &source_ss, &source_map) < 0)
2150 goto fail;
2151
2152 source_output_ss = source_ss;
2153 source_output_map = source_map;
2154
2155 if (!u.ec->init(u.core, u.ec, &source_output_ss, &source_output_map, &sink_ss, &sink_map, &source_ss, &source_map, &nframes,
2156 pa_modargs_get_value(ma, "aec_args", NULL))) {
2157 pa_log("Failed to init AEC engine");
2158 goto fail;
2159 }
2160 u.source_output_blocksize = nframes * pa_frame_size(&source_output_ss);
2161 u.source_blocksize = nframes * pa_frame_size(&source_ss);
2162 u.sink_blocksize = nframes * pa_frame_size(&sink_ss);
2163
2164 if (u.ec->params.drift_compensation) {
2165 if (argc < 6) {
2166 pa_log("Drift compensation enabled but drift file not specified");
2167 goto fail;
2168 }
2169
2170 u.drift_file = fopen(argv[5], "rt");
2171
2172 if (u.drift_file == NULL) {
2173 perror ("Could not open drift file");
2174 goto fail;
2175 }
2176 }
2177
2178 rdata = pa_xmalloc(u.source_output_blocksize);
2179 pdata = pa_xmalloc(u.sink_blocksize);
2180 cdata = pa_xmalloc(u.source_blocksize);
2181
2182 if (!u.ec->params.drift_compensation) {
2183 while (fread(rdata, u.source_output_blocksize, 1, u.captured_file) > 0) {
2184 if (fread(pdata, u.sink_blocksize, 1, u.played_file) == 0) {
2185 perror("Played file ended before captured file");
2186 goto fail;
2187 }
2188
2189 u.ec->run(u.ec, rdata, pdata, cdata);
2190
2191 unused = fwrite(cdata, u.source_blocksize, 1, u.canceled_file);
2192 }
2193 } else {
2194 while (fscanf(u.drift_file, "%c", &c) > 0) {
2195 switch (c) {
2196 case 'd':
2197 if (!fscanf(u.drift_file, "%a", &drift)) {
2198 perror("Drift file incomplete");
2199 goto fail;
2200 }
2201
2202 u.ec->set_drift(u.ec, drift);
2203
2204 break;
2205
2206 case 'c':
2207 if (!fscanf(u.drift_file, "%d", &i)) {
2208 perror("Drift file incomplete");
2209 goto fail;
2210 }
2211
2212 if (fread(rdata, i, 1, u.captured_file) <= 0) {
2213 perror("Captured file ended prematurely");
2214 goto fail;
2215 }
2216
2217 u.ec->record(u.ec, rdata, cdata);
2218
2219 unused = fwrite(cdata, i, 1, u.canceled_file);
2220
2221 break;
2222
2223 case 'p':
2224 if (!fscanf(u.drift_file, "%d", &i)) {
2225 perror("Drift file incomplete");
2226 goto fail;
2227 }
2228
2229 if (fread(pdata, i, 1, u.played_file) <= 0) {
2230 perror("Played file ended prematurely");
2231 goto fail;
2232 }
2233
2234 u.ec->play(u.ec, pdata);
2235
2236 break;
2237 }
2238 }
2239
2240 if (fread(rdata, i, 1, u.captured_file) > 0)
2241 pa_log("All capture data was not consumed");
2242 if (fread(pdata, i, 1, u.played_file) > 0)
2243 pa_log("All playback data was not consumed");
2244 }
2245
2246 u.ec->done(u.ec);
2247
2248 out:
2249 if (u.captured_file)
2250 fclose(u.captured_file);
2251 if (u.played_file)
2252 fclose(u.played_file);
2253 if (u.canceled_file)
2254 fclose(u.canceled_file);
2255 if (u.drift_file)
2256 fclose(u.drift_file);
2257
2258 pa_xfree(rdata);
2259 pa_xfree(pdata);
2260 pa_xfree(cdata);
2261
2262 pa_xfree(u.ec);
2263 pa_xfree(u.core);
2264
2265 if (ma)
2266 pa_modargs_free(ma);
2267
2268 return ret;
2269
2270 usage:
2271 pa_log("Usage: %s play_file rec_file out_file [module args] [drift_file]", argv[0]);
2272
2273 fail:
2274 ret = -1;
2275 goto out;
2276 }
2277 #endif /* ECHO_CANCEL_TEST */