]> code.delx.au - pulseaudio/blobdiff - src/modules/echo-cancel/webrtc.cc
Whitespace cleanup: Remove all multiple newlines
[pulseaudio] / src / modules / echo-cancel / webrtc.cc
index c53e96303b96b66a72a63c6896513b68dc591430..697e0baeff156cdf2be0900312daed4136914249 100644 (file)
@@ -42,11 +42,12 @@ PA_C_DECL_END
 
 #define DEFAULT_HIGH_PASS_FILTER TRUE
 #define DEFAULT_NOISE_SUPPRESSION TRUE
-#define DEFAULT_ANALOG_GAIN_CONTROL FALSE
-#define DEFAULT_DIGITAL_GAIN_CONTROL TRUE
+#define DEFAULT_ANALOG_GAIN_CONTROL TRUE
+#define DEFAULT_DIGITAL_GAIN_CONTROL FALSE
 #define DEFAULT_MOBILE FALSE
 #define DEFAULT_ROUTING_MODE "speakerphone"
 #define DEFAULT_COMFORT_NOISE TRUE
+#define DEFAULT_DRIFT_COMPENSATION FALSE
 
 static const char* const valid_modargs[] = {
     "high_pass_filter",
@@ -56,6 +57,7 @@ static const char* const valid_modargs[] = {
     "mobile",
     "routing_mode",
     "comfort_noise",
+    "drift_compensation",
     NULL
 };
 
@@ -75,13 +77,13 @@ static int routing_mode_from_string(const char *rmode) {
 }
 
 pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
-                            pa_sample_spec *source_ss, pa_channel_map *source_map,
-                            pa_sample_spec *sink_ss, pa_channel_map *sink_map,
-                            uint32_t *blocksize, const char *args)
-{
+                            pa_sample_spec *rec_ss, pa_channel_map *rec_map,
+                            pa_sample_spec *play_ss, pa_channel_map *play_map,
+                            pa_sample_spec *out_ss, pa_channel_map *out_map,
+                            uint32_t *nframes, const char *args) {
     webrtc::AudioProcessing *apm = NULL;
     pa_bool_t hpf, ns, agc, dgc, mobile, cn;
-    int rm;
+    int rm = -1;
     pa_modargs *ma;
 
     if (!(ma = pa_modargs_new(args, valid_modargs))) {
@@ -89,7 +91,6 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
         goto fail;
     }
 
-
     hpf = DEFAULT_HIGH_PASS_FILTER;
     if (pa_modargs_get_value_boolean(ma, "high_pass_filter", &hpf) < 0) {
         pa_log("Failed to parse high_pass_filter value");
@@ -108,8 +109,8 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
         goto fail;
     }
 
-    dgc = DEFAULT_DIGITAL_GAIN_CONTROL;
-    if (pa_modargs_get_value_boolean(ma, "analog_gain_control", &dgc) < 0) {
+    dgc = agc ? FALSE : DEFAULT_DIGITAL_GAIN_CONTROL;
+    if (pa_modargs_get_value_boolean(ma, "digital_gain_control", &dgc) < 0) {
         pa_log("Failed to parse digital_gain_control value");
         goto fail;
     }
@@ -125,7 +126,18 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
         goto fail;
     }
 
+    ec->params.drift_compensation = DEFAULT_DRIFT_COMPENSATION;
+    if (pa_modargs_get_value_boolean(ma, "drift_compensation", &ec->params.drift_compensation) < 0) {
+        pa_log("Failed to parse drift_compensation value");
+        goto fail;
+    }
+
     if (mobile) {
+        if (ec->params.drift_compensation) {
+            pa_log("Can't use drift_compensation in mobile mode");
+            goto fail;
+        }
+
         if ((rm = routing_mode_from_string(pa_modargs_get_value(ma, "routing_mode", DEFAULT_ROUTING_MODE))) < 0) {
             pa_log("Failed to parse routing_mode value");
             goto fail;
@@ -145,22 +157,30 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
 
     apm = webrtc::AudioProcessing::Create(0);
 
-    source_ss->format = PA_SAMPLE_S16NE;
-    *sink_ss = *source_ss;
+    out_ss->format = PA_SAMPLE_S16NE;
+    *play_ss = *out_ss;
     /* FIXME: the implementation actually allows a different number of
      * source/sink channels. Do we want to support that? */
-    *sink_map = *source_map;
+    *play_map = *out_map;
+    *rec_ss = *out_ss;
+    *rec_map = *out_map;
 
-    apm->set_sample_rate_hz(source_ss->rate);
+    apm->set_sample_rate_hz(out_ss->rate);
 
-    apm->set_num_channels(source_ss->channels, source_ss->channels);
-    apm->set_num_reverse_channels(sink_ss->channels);
+    apm->set_num_channels(out_ss->channels, out_ss->channels);
+    apm->set_num_reverse_channels(play_ss->channels);
 
     if (hpf)
         apm->high_pass_filter()->Enable(true);
 
     if (!mobile) {
-        apm->echo_cancellation()->enable_drift_compensation(false);
+        if (ec->params.drift_compensation) {
+            apm->echo_cancellation()->set_device_sample_rate_hz(out_ss->rate);
+            apm->echo_cancellation()->enable_drift_compensation(true);
+        } else {
+            apm->echo_cancellation()->enable_drift_compensation(false);
+        }
+
         apm->echo_cancellation()->Enable(true);
     } else {
         apm->echo_control_mobile()->set_routing_mode(static_cast<webrtc::EchoControlMobile::RoutingMode>(rm));
@@ -174,23 +194,31 @@ pa_bool_t pa_webrtc_ec_init(pa_core *c, pa_echo_canceller *ec,
     }
 
     if (agc || dgc) {
-        if (mobile && rm <= webrtc::EchoControlMobile::kEarpiece)
+        if (mobile && rm <= webrtc::EchoControlMobile::kEarpiece) {
             /* Maybe this should be a knob, but we've got a lot of knobs already */
             apm->gain_control()->set_mode(webrtc::GainControl::kFixedDigital);
-        else if (dgc)
-            apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveDigital);
-        else {
-            /* FIXME: Hook up for analog AGC */
-            pa_log("Analog gain control isn't implemented yet -- using ditital gain control.");
+            ec->params.priv.webrtc.agc = FALSE;
+        } else if (dgc) {
             apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveDigital);
+            ec->params.priv.webrtc.agc = FALSE;
+        } else {
+            apm->gain_control()->set_mode(webrtc::GainControl::kAdaptiveAnalog);
+            if (apm->gain_control()->set_analog_level_limits(0, PA_VOLUME_NORM-1) != apm->kNoError) {
+                pa_log("Failed to initialise AGC");
+                goto fail;
+            }
+            ec->params.priv.webrtc.agc = TRUE;
         }
+
+        apm->gain_control()->Enable(true);
     }
 
     apm->voice_detection()->Enable(true);
 
     ec->params.priv.webrtc.apm = apm;
-    ec->params.priv.webrtc.sample_spec = *source_ss;
-    ec->params.priv.webrtc.blocksize = *blocksize = (uint64_t)pa_bytes_per_second(source_ss) * BLOCK_SIZE_US / PA_USEC_PER_SEC;
+    ec->params.priv.webrtc.sample_spec = *out_ss;
+    ec->params.priv.webrtc.blocksize = (uint64_t)pa_bytes_per_second(out_ss) * BLOCK_SIZE_US / PA_USEC_PER_SEC;
+    *nframes = ec->params.priv.webrtc.blocksize / pa_frame_size(out_ss);
 
     pa_modargs_free(ma);
     return TRUE;
@@ -204,9 +232,9 @@ fail:
     return FALSE;
 }
 
-void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) {
+void pa_webrtc_ec_play(pa_echo_canceller *ec, const uint8_t *play) {
     webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
-    webrtc::AudioFrame play_frame, out_frame;
+    webrtc::AudioFrame play_frame;
     const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
 
     play_frame._audioChannel = ss->channels;
@@ -214,18 +242,49 @@ void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *
     play_frame._payloadDataLengthInSamples = ec->params.priv.webrtc.blocksize / pa_frame_size(ss);
     memcpy(play_frame._payloadData, play, ec->params.priv.webrtc.blocksize);
 
+    apm->AnalyzeReverseStream(&play_frame);
+}
+
+void pa_webrtc_ec_record(pa_echo_canceller *ec, const uint8_t *rec, uint8_t *out) {
+    webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
+    webrtc::AudioFrame out_frame;
+    const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
+    pa_cvolume v;
+
     out_frame._audioChannel = ss->channels;
     out_frame._frequencyInHz = ss->rate;
     out_frame._payloadDataLengthInSamples = ec->params.priv.webrtc.blocksize / pa_frame_size(ss);
     memcpy(out_frame._payloadData, rec, ec->params.priv.webrtc.blocksize);
 
-    apm->AnalyzeReverseStream(&play_frame);
+    if (ec->params.priv.webrtc.agc) {
+        pa_cvolume_init(&v);
+        pa_echo_canceller_get_capture_volume(ec, &v);
+        apm->gain_control()->set_stream_analog_level(pa_cvolume_avg(&v));
+    }
+
     apm->set_stream_delay_ms(0);
     apm->ProcessStream(&out_frame);
 
+    if (ec->params.priv.webrtc.agc) {
+        pa_cvolume_set(&v, ss->channels, apm->gain_control()->stream_analog_level());
+        pa_echo_canceller_set_capture_volume(ec, &v);
+    }
+
     memcpy(out, out_frame._payloadData, ec->params.priv.webrtc.blocksize);
 }
 
+void pa_webrtc_ec_set_drift(pa_echo_canceller *ec, float drift) {
+    webrtc::AudioProcessing *apm = (webrtc::AudioProcessing*)ec->params.priv.webrtc.apm;
+    const pa_sample_spec *ss = &ec->params.priv.webrtc.sample_spec;
+
+    apm->echo_cancellation()->set_stream_drift_samples(drift * ec->params.priv.webrtc.blocksize / pa_frame_size(ss));
+}
+
+void pa_webrtc_ec_run(pa_echo_canceller *ec, const uint8_t *rec, const uint8_t *play, uint8_t *out) {
+    pa_webrtc_ec_play(ec, play);
+    pa_webrtc_ec_record(ec, rec, out);
+}
+
 void pa_webrtc_ec_done(pa_echo_canceller *ec) {
     if (ec->params.priv.webrtc.apm) {
         webrtc::AudioProcessing::Destroy((webrtc::AudioProcessing*)ec->params.priv.webrtc.apm);