Sync to webrtc Tot

Update to latest lkgr commit
https://webrtc.googlesource.com/src/+/0cc11b4b947e0ceae14e717aa25ceffc480529a3

This is to include recent important changes of AEC3:

- https://webrtc-review.googlesource.com/c/src/+/112136
- https://webrtc-review.googlesource.com/c/src/+/111602
- https://webrtc-review.googlesource.com/c/src/+/109400

Note that there are some apm/aec config keys modified in this change.
Existing user should fix them separately.

BUG=None
TEST=emerge-nocturne webrtc-apm

Change-Id: Ib5517526db1a01dffa673d8ab0dc828ceca73535
Reviewed-on: https://chromium-review.googlesource.com/1354142
Commit-Ready: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Tested-by: Hsinyu Chao <hychao@chromium.org>
Reviewed-by: Cheng-Yi Chiang <cychiang@chromium.org>
Reviewed-by: Per Ã…hgren <peah@chromium.org>
diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc
index 29d0b9a..3eb2a8d 100644
--- a/api/audio/echo_canceller3_config.cc
+++ b/api/audio/echo_canceller3_config.cc
@@ -148,11 +148,12 @@
     c->erle.min = std::min(c->erle.max_l, c->erle.max_h);
     res = false;
   }
+  res = res & Limit(&c->erle.num_sections, 1, c->filter.main.length_blocks);
 
   res = res & Limit(&c->ep_strength.lf, 0.f, 1000000.f);
   res = res & Limit(&c->ep_strength.mf, 0.f, 1000000.f);
   res = res & Limit(&c->ep_strength.hf, 0.f, 1000000.f);
-  res = res & Limit(&c->ep_strength.default_len, 0.f, 1.f);
+  res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
 
   res =
       res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
@@ -243,6 +244,12 @@
 
   res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f);
 
+  if (c->delay.delay_headroom_blocks >
+      c->filter.main_initial.length_blocks - 1) {
+    c->delay.delay_headroom_blocks = c->filter.main_initial.length_blocks - 1;
+    res = false;
+  }
+
   return res;
 }
 }  // namespace webrtc
diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h
index 251f282..ffe17f2 100644
--- a/api/audio/echo_canceller3_config.h
+++ b/api/audio/echo_canceller3_config.h
@@ -87,6 +87,7 @@
     float max_l = 4.f;
     float max_h = 1.5f;
     bool onset_detection = true;
+    size_t num_sections = 1;
   } erle;
 
   struct EpStrength {
@@ -106,8 +107,8 @@
     float audibility_threshold_lf = 10;
     float audibility_threshold_mf = 10;
     float audibility_threshold_hf = 10;
-    bool use_stationary_properties = true;
-    bool use_stationarity_properties_at_init = true;
+    bool use_stationary_properties = false;
+    bool use_stationarity_properties_at_init = false;
   } echo_audibility;
 
   struct RenderLevels {
@@ -181,8 +182,8 @@
                                    0.25f);
 
     struct DominantNearendDetection {
-      float enr_threshold = 4.f;
-      float enr_exit_threshold = .1f;
+      float enr_threshold = .25f;
+      float enr_exit_threshold = 10.f;
       float snr_threshold = 30.f;
       int hold_duration = 50;
       int trigger_threshold = 12;
diff --git a/api/audio/echo_canceller3_config_json.cc b/api/audio/echo_canceller3_config_json.cc
index d039c8b..01a831c 100644
--- a/api/audio/echo_canceller3_config_json.cc
+++ b/api/audio/echo_canceller3_config_json.cc
@@ -197,6 +197,7 @@
     ReadParam(section, "max_l", &cfg.erle.max_l);
     ReadParam(section, "max_h", &cfg.erle.max_h);
     ReadParam(section, "onset_detection", &cfg.erle.onset_detection);
+    ReadParam(section, "num_sections", &cfg.erle.num_sections);
   }
 
   if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", &section)) {
@@ -425,7 +426,8 @@
   ost << "\"max_l\": " << config.erle.max_l << ",";
   ost << "\"max_h\": " << config.erle.max_h << ",";
   ost << "\"onset_detection\": "
-      << (config.erle.onset_detection ? "true" : "false");
+      << (config.erle.onset_detection ? "true" : "false") << ",";
+  ost << "\"num_sections\": " << config.erle.num_sections;
   ost << "},";
 
   ost << "\"ep_strength\": {";
diff --git a/api/audio_options.cc b/api/audio_options.cc
index d464118..e33214b 100644
--- a/api/audio_options.cc
+++ b/api/audio_options.cc
@@ -49,6 +49,8 @@
           change.audio_jitter_buffer_max_packets);
   SetFrom(&audio_jitter_buffer_fast_accelerate,
           change.audio_jitter_buffer_fast_accelerate);
+  SetFrom(&audio_jitter_buffer_min_delay_ms,
+          change.audio_jitter_buffer_min_delay_ms);
   SetFrom(&typing_detection, change.typing_detection);
   SetFrom(&experimental_agc, change.experimental_agc);
   SetFrom(&extended_filter_aec, change.extended_filter_aec);
@@ -76,6 +78,8 @@
          audio_jitter_buffer_max_packets == o.audio_jitter_buffer_max_packets &&
          audio_jitter_buffer_fast_accelerate ==
              o.audio_jitter_buffer_fast_accelerate &&
+         audio_jitter_buffer_min_delay_ms ==
+             o.audio_jitter_buffer_min_delay_ms &&
          typing_detection == o.typing_detection &&
          experimental_agc == o.experimental_agc &&
          extended_filter_aec == o.extended_filter_aec &&
@@ -107,6 +111,8 @@
                 audio_jitter_buffer_max_packets);
   ToStringIfSet(&result, "audio_jitter_buffer_fast_accelerate",
                 audio_jitter_buffer_fast_accelerate);
+  ToStringIfSet(&result, "audio_jitter_buffer_min_delay_ms",
+                audio_jitter_buffer_min_delay_ms);
   ToStringIfSet(&result, "typing", typing_detection);
   ToStringIfSet(&result, "experimental_agc", experimental_agc);
   ToStringIfSet(&result, "extended_filter_aec", extended_filter_aec);
diff --git a/api/audio_options.h b/api/audio_options.h
index 8ae8319..c2d1f44 100644
--- a/api/audio_options.h
+++ b/api/audio_options.h
@@ -54,6 +54,8 @@
   absl::optional<int> audio_jitter_buffer_max_packets;
   // Audio receiver jitter buffer (NetEq) fast accelerate mode.
   absl::optional<bool> audio_jitter_buffer_fast_accelerate;
+  // Audio receiver jitter buffer (NetEq) minimum target delay in milliseconds.
+  absl::optional<int> audio_jitter_buffer_min_delay_ms;
   // Audio processing to detect typing.
   absl::optional<bool> typing_detection;
   absl::optional<bool> experimental_agc;
diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc
new file mode 100644
index 0000000..1a6d086
--- /dev/null
+++ b/api/create_peerconnection_factory.cc
@@ -0,0 +1,182 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/create_peerconnection_factory.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/call/callfactoryinterface.h"
+#include "api/peerconnectioninterface.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "logging/rtc_event_log/rtc_event_log_factory.h"
+#include "logging/rtc_event_log/rtc_event_log_factory_interface.h"
+#include "media/engine/webrtcmediaengine.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+#if defined(USE_BUILTIN_SW_CODECS)
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory) {
+  return CreatePeerConnectionFactoryWithAudioMixer(
+      nullptr /*network_thread*/, nullptr /*worker_thread*/,
+      nullptr /*signaling_thread*/, nullptr /*default_adm*/,
+      audio_encoder_factory, audio_decoder_factory,
+      nullptr /*video_encoder_factory*/, nullptr /*video_decoder_factory*/,
+      nullptr /*audio_mixer*/);
+}
+
+// Note: all the other CreatePeerConnectionFactory variants just end up calling
+// this, ultimately.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing) {
+  rtc::scoped_refptr<AudioProcessing> audio_processing_use = audio_processing;
+  if (!audio_processing_use) {
+    audio_processing_use = AudioProcessingBuilder().Create();
+  }
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine(
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          video_encoder_factory, video_decoder_factory, audio_mixer,
+          audio_processing_use));
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+
+  return CreateModularPeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, std::move(media_engine),
+      std::move(call_factory), std::move(event_log_factory));
+}
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
+    std::unique_ptr<NetworkControllerFactoryInterface>
+        network_controller_factory) {
+  rtc::scoped_refptr<AudioProcessing> audio_processing_use = audio_processing;
+  if (!audio_processing_use) {
+    audio_processing_use = AudioProcessingBuilder().Create();
+  }
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine(
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          video_encoder_factory, video_decoder_factory, audio_mixer,
+          audio_processing_use));
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+
+  return CreateModularPeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, std::move(media_engine),
+      std::move(call_factory), std::move(event_log_factory),
+      std::move(fec_controller_factory), std::move(network_controller_factory));
+}
+#endif
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    rtc::scoped_refptr<AudioDeviceModule> default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
+    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing) {
+  if (!audio_processing)
+    audio_processing = AudioProcessingBuilder().Create();
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine =
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          std::move(video_encoder_factory), std::move(video_decoder_factory),
+          audio_mixer, audio_processing);
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+  PeerConnectionFactoryDependencies dependencies;
+  dependencies.network_thread = network_thread;
+  dependencies.worker_thread = worker_thread;
+  dependencies.signaling_thread = signaling_thread;
+  dependencies.media_engine = std::move(media_engine);
+  dependencies.call_factory = std::move(call_factory);
+  dependencies.event_log_factory = std::move(event_log_factory);
+  return CreateModularPeerConnectionFactory(std::move(dependencies));
+}
+
+#if defined(USE_BUILTIN_SW_CODECS)
+rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactoryWithAudioMixer(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer) {
+  return CreatePeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, default_adm,
+      audio_encoder_factory, audio_decoder_factory, video_encoder_factory,
+      video_decoder_factory, audio_mixer, nullptr);
+}
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
+  return CreatePeerConnectionFactoryWithAudioMixer(
+      network_thread, worker_thread, signaling_thread, default_adm,
+      audio_encoder_factory, audio_decoder_factory, video_encoder_factory,
+      video_decoder_factory, nullptr);
+}
+#endif
+
+}  // namespace webrtc
diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h
new file mode 100644
index 0000000..baa50c7
--- /dev/null
+++ b/api/create_peerconnection_factory.h
@@ -0,0 +1,179 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_CREATE_PEERCONNECTION_FACTORY_H_
+#define API_CREATE_PEERCONNECTION_FACTORY_H_
+
+#include <memory>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/fec_controller.h"
+#include "api/peerconnectioninterface.h"
+#include "api/transport/network_control.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+// TODO(bugs.webrtc.org/9987): Move rtc::Thread to api/ or expose a better
+// type. At the moment, rtc::Thread is not part of api/ so it cannot be
+// included in order to avoid to leak internal types.
+class Thread;
+}  // namespace rtc
+
+namespace cricket {
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+}  // namespace cricket
+
+namespace webrtc {
+
+class AudioDeviceModule;
+class AudioProcessing;
+
+#if defined(USE_BUILTIN_SW_CODECS)
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// This method relies on the thread it's called on as the "signaling thread"
+// for the PeerConnectionFactory it creates.
+//
+// As such, if the current thread is not already running an rtc::Thread message
+// loop, an application using this method must eventually either call
+// rtc::Thread::Current()->Run(), or call
+// rtc::Thread::Current()->ProcessMessages() within the application's own
+// message loop.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// |network_thread|, |worker_thread| and |signaling_thread| are
+// the only mandatory parameters.
+//
+// If non-null, a reference is added to |default_adm|, and ownership of
+// |video_encoder_factory| and |video_decoder_factory| is transferred to the
+// returned factory.
+// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
+// ownership transfer and ref counting more obvious.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixed and audio processing modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixer, audio processing, and fec controller modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+// If |fec_controller_factory| is null, an internal fec controller module will
+// be created and used.
+// If |network_controller_factory| is provided, it will be used if enabled via
+// field trial.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
+    std::unique_ptr<NetworkControllerFactoryInterface>
+        network_controller_factory = nullptr);
+#endif  // defined(USE_BUILTIN_SW_CODECS)
+
+// Create a new instance of PeerConnectionFactoryInterface with optional video
+// codec factories. These video factories represents all video codecs, i.e. no
+// extra internal video codecs will be added.
+// When building WebRTC with rtc_use_builtin_sw_codecs = false, this is the
+// only available CreatePeerConnectionFactory overload.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    rtc::scoped_refptr<AudioDeviceModule> default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
+    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+#if defined(USE_BUILTIN_SW_CODECS)
+// Create a new instance of PeerConnectionFactoryInterface with external audio
+// mixer.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactoryWithAudioMixer(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+// Same thread is used as worker and network thread.
+RTC_EXPORT inline rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* worker_and_network_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
+  return CreatePeerConnectionFactory(
+      worker_and_network_thread, worker_and_network_thread, signaling_thread,
+      default_adm, audio_encoder_factory, audio_decoder_factory,
+      video_encoder_factory, video_decoder_factory);
+}
+#endif  // defined(USE_BUILTIN_SW_CODECS)
+
+}  // namespace webrtc
+
+#endif  // API_CREATE_PEERCONNECTION_FACTORY_H_
diff --git a/api/media_transport_interface.cc b/api/media_transport_interface.cc
index 039a4a1..ef223aa 100644
--- a/api/media_transport_interface.cc
+++ b/api/media_transport_interface.cc
@@ -114,9 +114,6 @@
 void MediaTransportInterface::RemoveTargetTransferRateObserver(
     webrtc::TargetTransferRateObserver* observer) {}
 
-void MediaTransportInterface::SetTargetTransferRateObserver(
-    webrtc::TargetTransferRateObserver* observer) {}
-
 void MediaTransportInterface::AddTargetTransferRateObserver(
     webrtc::TargetTransferRateObserver* observer) {}
 
diff --git a/api/media_transport_interface.h b/api/media_transport_interface.h
index 7570160..b10dd63 100644
--- a/api/media_transport_interface.h
+++ b/api/media_transport_interface.h
@@ -27,7 +27,6 @@
 #include "api/array_view.h"
 #include "api/rtcerror.h"
 #include "api/video/encoded_image.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "rtc_base/copyonwritebuffer.h"
 #include "rtc_base/networkroute.h"
 
@@ -336,15 +335,6 @@
   // pass a nullptr.
   virtual void SetReceiveVideoSink(MediaTransportVideoSinkInterface* sink) = 0;
 
-  // Sets a target bitrate observer. Before media transport is destructed
-  // the observer must be unregistered (set to nullptr).
-  // A newly registered observer will be called back with the latest recorded
-  // target rate, if available.
-  // TODO(psla): This method will be removed, in favor of
-  // AddTargetTransferRateObserver.
-  virtual void SetTargetTransferRateObserver(
-      TargetTransferRateObserver* observer);
-
   // Adds a target bitrate observer. Before media transport is destructed
   // the observer must be unregistered (by calling
   // RemoveTargetTransferRateObserver).
diff --git a/api/mediastreaminterface.cc b/api/mediastreaminterface.cc
index e36d5cb..955e7e4 100644
--- a/api/mediastreaminterface.cc
+++ b/api/mediastreaminterface.cc
@@ -30,4 +30,8 @@
   return nullptr;
 }
 
+const cricket::AudioOptions AudioSourceInterface::options() const {
+  return {};
+}
+
 }  // namespace webrtc
diff --git a/api/mediastreaminterface.h b/api/mediastreaminterface.h
index 30f8f71..6d96766 100644
--- a/api/mediastreaminterface.h
+++ b/api/mediastreaminterface.h
@@ -23,6 +23,7 @@
 #include <vector>
 
 #include "absl/types/optional.h"
+#include "api/audio_options.h"
 #include "api/video/video_frame.h"
 #include "api/video/video_sink_interface.h"
 #include "api/video/video_source_interface.h"
@@ -207,6 +208,11 @@
   // TODO(tommi): Make pure virtual.
   virtual void AddSink(AudioTrackSinkInterface* sink) {}
   virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
+
+  // Returns options for the AudioSource.
+  // (for some of the settings this approach is broken, e.g. setting
+  // audio network adaptation on the source is the wrong layer of abstraction).
+  virtual const cricket::AudioOptions options() const;
 };
 
 // Interface of the audio processor used by the audio track to collect
diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h
index 80c3091..54161b8 100644
--- a/api/peerconnectioninterface.h
+++ b/api/peerconnectioninterface.h
@@ -340,6 +340,22 @@
       media_config.video.experiment_cpu_load_estimator = enable;
     }
 
+    int audio_rtcp_report_interval_ms() const {
+      return media_config.audio.rtcp_report_interval_ms;
+    }
+    void set_audio_rtcp_report_interval_ms(int audio_rtcp_report_interval_ms) {
+      media_config.audio.rtcp_report_interval_ms =
+          audio_rtcp_report_interval_ms;
+    }
+
+    int video_rtcp_report_interval_ms() const {
+      return media_config.video.rtcp_report_interval_ms;
+    }
+    void set_video_rtcp_report_interval_ms(int video_rtcp_report_interval_ms) {
+      media_config.video.rtcp_report_interval_ms =
+          video_rtcp_report_interval_ms;
+    }
+
     static const int kUndefined = -1;
     // Default maximum number of packets in the audio jitter buffer.
     static const int kAudioJitterBufferMaxPackets = 50;
@@ -434,6 +450,9 @@
     // if it falls behind.
     bool audio_jitter_buffer_fast_accelerate = false;
 
+    // The minimum delay in milliseconds for the audio jitter buffer.
+    int audio_jitter_buffer_min_delay_ms = 0;
+
     // Timeout in milliseconds before an ICE candidate pair is considered to be
     // "not receiving", after which a lower priority candidate pair may be
     // selected.
@@ -597,6 +616,14 @@
     // settings set in PeerConnectionFactory (which is deprecated).
     absl::optional<CryptoOptions> crypto_options;
 
+    // Configure if we should include the SDP attribute extmap-allow-mixed in
+    // our offer. Although we currently do support this, it's not included in
+    // our offer by default due to a previous bug that caused the SDP parser to
+    // abort parsing if this attribute was present. This is fixed in Chrome 71.
+    // TODO(webrtc:9985): Change default to true once sufficient time has
+    // passed.
+    bool offer_extmap_allow_mixed = false;
+
     //
     // Don't forget to update operator== if adding something.
     //
@@ -1348,142 +1375,6 @@
   ~PeerConnectionFactoryInterface() override = default;
 };
 
-#if defined(USE_BUILTIN_SW_CODECS)
-// Create a new instance of PeerConnectionFactoryInterface.
-//
-// This method relies on the thread it's called on as the "signaling thread"
-// for the PeerConnectionFactory it creates.
-//
-// As such, if the current thread is not already running an rtc::Thread message
-// loop, an application using this method must eventually either call
-// rtc::Thread::Current()->Run(), or call
-// rtc::Thread::Current()->ProcessMessages() within the application's own
-// message loop.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory);
-
-// Create a new instance of PeerConnectionFactoryInterface.
-//
-// |network_thread|, |worker_thread| and |signaling_thread| are
-// the only mandatory parameters.
-//
-// If non-null, a reference is added to |default_adm|, and ownership of
-// |video_encoder_factory| and |video_decoder_factory| is transferred to the
-// returned factory.
-// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
-// ownership transfer and ref counting more obvious.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
-
-// Create a new instance of PeerConnectionFactoryInterface with optional
-// external audio mixed and audio processing modules.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-// If |audio_processing| is null, an internal audio processing module will be
-// created and used.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing);
-
-// Create a new instance of PeerConnectionFactoryInterface with optional
-// external audio mixer, audio processing, and fec controller modules.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-// If |audio_processing| is null, an internal audio processing module will be
-// created and used.
-// If |fec_controller_factory| is null, an internal fec controller module will
-// be created and used.
-// If |network_controller_factory| is provided, it will be used if enabled via
-// field trial.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing,
-    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
-    std::unique_ptr<NetworkControllerFactoryInterface>
-        network_controller_factory = nullptr);
-#endif
-
-// Create a new instance of PeerConnectionFactoryInterface with optional video
-// codec factories. These video factories represents all video codecs, i.e. no
-// extra internal video codecs will be added.
-// When building WebRTC with rtc_use_builtin_sw_codecs = false, this is the
-// only available CreatePeerConnectionFactory overload.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    rtc::scoped_refptr<AudioDeviceModule> default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
-    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing);
-
-#if defined(USE_BUILTIN_SW_CODECS)
-// Create a new instance of PeerConnectionFactoryInterface with external audio
-// mixer.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactoryWithAudioMixer(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer);
-
-// Create a new instance of PeerConnectionFactoryInterface.
-// Same thread is used as worker and network thread.
-RTC_EXPORT inline rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* worker_and_network_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
-  return CreatePeerConnectionFactory(
-      worker_and_network_thread, worker_and_network_thread, signaling_thread,
-      default_adm, audio_encoder_factory, audio_decoder_factory,
-      video_encoder_factory, video_decoder_factory);
-}
-#endif
-
 // This is a lower-level version of the CreatePeerConnectionFactory functions
 // above. It's implemented in the "peerconnection" build target, whereas the
 // above methods are only implemented in the broader "libjingle_peerconnection"
diff --git a/api/proxy.cc b/api/proxy.cc
index 01e6be5..e668285 100644
--- a/api/proxy.cc
+++ b/api/proxy.cc
@@ -14,7 +14,7 @@
 namespace internal {
 
 SynchronousMethodCall::SynchronousMethodCall(rtc::MessageHandler* proxy)
-    : e_(), proxy_(proxy) {}
+    : proxy_(proxy) {}
 
 SynchronousMethodCall::~SynchronousMethodCall() = default;
 
@@ -23,15 +23,14 @@
   if (t->IsCurrent()) {
     proxy_->OnMessage(nullptr);
   } else {
-    e_ = absl::make_unique<rtc::Event>();
     t->Post(posted_from, this, 0);
-    e_->Wait(rtc::Event::kForever);
+    e_.Wait(rtc::Event::kForever);
   }
 }
 
 void SynchronousMethodCall::OnMessage(rtc::Message*) {
   proxy_->OnMessage(nullptr);
-  e_->Set();
+  e_.Set();
 }
 
 }  // namespace internal
diff --git a/api/proxy.h b/api/proxy.h
index c8962ef..9916051 100644
--- a/api/proxy.h
+++ b/api/proxy.h
@@ -143,7 +143,7 @@
  private:
   void OnMessage(rtc::Message*) override;
 
-  std::unique_ptr<rtc::Event> e_;
+  rtc::Event e_;
   rtc::MessageHandler* proxy_;
 };
 
diff --git a/api/rtp_headers.h b/api/rtp_headers.h
index eff6223..c766899 100644
--- a/api/rtp_headers.h
+++ b/api/rtp_headers.h
@@ -15,7 +15,9 @@
 #include <stdint.h>
 #include <string.h>
 
+#include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/video/color_space.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_frame_marking.h"
 #include "api/video/video_rotation.h"
@@ -126,6 +128,8 @@
   // For identifying the media section used to interpret this RTP packet. See
   // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
   Mid mid;
+
+  absl::optional<ColorSpace> color_space;
 };
 
 struct RTPHeader {
diff --git a/api/scoped_refptr.h b/api/scoped_refptr.h
new file mode 100644
index 0000000..0993e03
--- /dev/null
+++ b/api/scoped_refptr.h
@@ -0,0 +1,162 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Originally these classes are from Chromium.
+// http://src.chromium.org/viewvc/chrome/trunk/src/base/memory/ref_counted.h?view=markup
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     ...
+//     foo = nullptr;  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references null.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+
+#ifndef API_SCOPED_REFPTR_H_
+#define API_SCOPED_REFPTR_H_
+
+#include <memory>
+#include <utility>
+
+namespace rtc {
+
+template <class T>
+class scoped_refptr {
+ public:
+  scoped_refptr() : ptr_(nullptr) {}
+
+  scoped_refptr(T* p) : ptr_(p) {  // NOLINT(runtime/explicit)
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  template <typename U>
+  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  // Move constructors.
+  scoped_refptr(scoped_refptr<T>&& r) : ptr_(r.release()) {}
+
+  template <typename U>
+  scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.release()) {}
+
+  ~scoped_refptr() {
+    if (ptr_)
+      ptr_->Release();
+  }
+
+  T* get() const { return ptr_; }
+  operator T*() const { return ptr_; }
+  T* operator->() const { return ptr_; }
+
+  // Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a
+  // null pointer, all without touching the reference count of the underlying
+  // pointed-to object. The object is still reference counted, and the caller of
+  // release() is now the proud owner of one reference, so it is responsible for
+  // calling Release() once on the object when no longer using it.
+  T* release() {
+    T* retVal = ptr_;
+    ptr_ = nullptr;
+    return retVal;
+  }
+
+  scoped_refptr<T>& operator=(T* p) {
+    // AddRef first so that self assignment should work
+    if (p)
+      p->AddRef();
+    if (ptr_)
+      ptr_->Release();
+    ptr_ = p;
+    return *this;
+  }
+
+  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+    return *this = r.ptr_;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+    return *this = r.get();
+  }
+
+  scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  void swap(T** pp) {
+    T* p = ptr_;
+    ptr_ = *pp;
+    *pp = p;
+  }
+
+  void swap(scoped_refptr<T>& r) { swap(&r.ptr_); }
+
+ protected:
+  T* ptr_;
+};
+
+}  // namespace rtc
+
+#endif  // API_SCOPED_REFPTR_H_
diff --git a/api/units/data_rate.cc b/api/units/data_rate.cc
index 9170627..d72d958 100644
--- a/api/units/data_rate.cc
+++ b/api/units/data_rate.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const DataRate& value) {
+std::string ToString(DataRate value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/data_rate.h b/api/units/data_rate.h
index 28efcd3..7119284 100644
--- a/api/units/data_rate.h
+++ b/api/units/data_rate.h
@@ -15,9 +15,6 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <algorithm>
-#include <cmath>
 #include <limits>
 #include <string>
 #include <type_traits>
@@ -25,12 +22,10 @@
 #include "api/units/data_size.h"
 #include "api/units/time_delta.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
 namespace data_rate_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-
 inline int64_t Microbits(const DataSize& size) {
   constexpr int64_t kMaxBeforeConversion =
       std::numeric_limits<int64_t>::max() / 8000000;
@@ -43,184 +38,64 @@
 // DataRate is a class that represents a given data rate. This can be used to
 // represent bandwidth, encoding bitrate, etc. The internal storage is bits per
 // second (bps).
-class DataRate {
+class DataRate final : public rtc_units_impl::RelativeUnit<DataRate> {
  public:
   DataRate() = delete;
-  static constexpr DataRate Zero() { return DataRate(0); }
-  static constexpr DataRate Infinity() {
-    return DataRate(data_rate_impl::kPlusInfinityVal);
-  }
+  static constexpr DataRate Infinity() { return PlusInfinity(); }
   template <int64_t bps>
   static constexpr DataRate BitsPerSec() {
-    static_assert(bps >= 0, "");
-    static_assert(bps < data_rate_impl::kPlusInfinityVal, "");
-    return DataRate(bps);
+    return FromStaticValue<bps>();
   }
   template <int64_t kbps>
   static constexpr DataRate KilobitsPerSec() {
-    static_assert(kbps >= 0, "");
-    static_assert(kbps < data_rate_impl::kPlusInfinityVal / 1000, "");
-    return DataRate(kbps * 1000);
-  }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-  static DataRate bps(T bits_per_second) {
-    RTC_DCHECK_GE(bits_per_second, 0);
-    RTC_DCHECK_LT(bits_per_second, data_rate_impl::kPlusInfinityVal);
-    return DataRate(rtc::dchecked_cast<int64_t>(bits_per_second));
-  }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-  static DataRate kbps(T kilobits_per_sec) {
-    RTC_DCHECK_GE(kilobits_per_sec, 0);
-    RTC_DCHECK_LT(kilobits_per_sec, data_rate_impl::kPlusInfinityVal / 1000);
-    return DataRate::bps(rtc::dchecked_cast<int64_t>(kilobits_per_sec) * 1000);
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataRate bps(T bits_per_second) {
-    if (bits_per_second == std::numeric_limits<T>::infinity()) {
-      return Infinity();
-    } else {
-      RTC_DCHECK(!std::isnan(bits_per_second));
-      RTC_DCHECK_GE(bits_per_second, 0);
-      RTC_DCHECK_LT(bits_per_second, data_rate_impl::kPlusInfinityVal);
-      return DataRate(rtc::dchecked_cast<int64_t>(bits_per_second));
-    }
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataRate kbps(T kilobits_per_sec) {
-    return DataRate::bps(kilobits_per_sec * 1e3);
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type bps() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(bits_per_sec_);
-  }
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type kbps() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeKilobitsPerSec());
-  }
-
-  template <typename T>
-  typename std::enable_if<std::is_floating_point<T>::value,
-                          T>::type constexpr bps() const {
-    return IsInfinite() ? std::numeric_limits<T>::infinity() : bits_per_sec_;
+    return FromStaticFraction<kbps, 1000>();
   }
   template <typename T>
-  typename std::enable_if<std::is_floating_point<T>::value,
-                          T>::type constexpr kbps() const {
-    return bps<T>() * 1e-3;
+  static constexpr DataRate bps(T bits_per_second) {
+    return FromValue(bits_per_second);
   }
-
+  template <typename T>
+  static constexpr DataRate kbps(T kilobits_per_sec) {
+    return FromFraction<1000>(kilobits_per_sec);
+  }
+  template <typename T = int64_t>
+  constexpr T bps() const {
+    return ToValue<T>();
+  }
+  template <typename T = int64_t>
+  T kbps() const {
+    return ToFraction<1000, T>();
+  }
   constexpr int64_t bps_or(int64_t fallback_value) const {
-    return IsFinite() ? bits_per_sec_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
   constexpr int64_t kbps_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeKilobitsPerSec() : fallback_value;
-  }
-
-  constexpr bool IsZero() const { return bits_per_sec_ == 0; }
-  constexpr bool IsInfinite() const {
-    return bits_per_sec_ == data_rate_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  DataRate Clamped(DataRate min_rate, DataRate max_rate) const {
-    return std::max(min_rate, std::min(*this, max_rate));
-  }
-  void Clamp(DataRate min_rate, DataRate max_rate) {
-    *this = Clamped(min_rate, max_rate);
-  }
-  DataRate operator-(const DataRate& other) const {
-    return DataRate::bps(bps() - other.bps());
-  }
-  DataRate operator+(const DataRate& other) const {
-    return DataRate::bps(bps() + other.bps());
-  }
-  DataRate& operator-=(const DataRate& other) {
-    *this = *this - other;
-    return *this;
-  }
-  DataRate& operator+=(const DataRate& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const DataRate& other) const {
-    return bps<double>() / other.bps<double>();
-  }
-  constexpr bool operator==(const DataRate& other) const {
-    return bits_per_sec_ == other.bits_per_sec_;
-  }
-  constexpr bool operator!=(const DataRate& other) const {
-    return bits_per_sec_ != other.bits_per_sec_;
-  }
-  constexpr bool operator<=(const DataRate& other) const {
-    return bits_per_sec_ <= other.bits_per_sec_;
-  }
-  constexpr bool operator>=(const DataRate& other) const {
-    return bits_per_sec_ >= other.bits_per_sec_;
-  }
-  constexpr bool operator>(const DataRate& other) const {
-    return bits_per_sec_ > other.bits_per_sec_;
-  }
-  constexpr bool operator<(const DataRate& other) const {
-    return bits_per_sec_ < other.bits_per_sec_;
+    return ToFractionOr<1000>(fallback_value);
   }
 
  private:
   // Bits per second used internally to simplify debugging by making the value
   // more recognizable.
-  explicit constexpr DataRate(int64_t bits_per_second)
-      : bits_per_sec_(bits_per_second) {}
-  constexpr int64_t UnsafeKilobitsPerSec() const {
-    return (bits_per_sec_ + 500) / 1000;
-  }
-  int64_t bits_per_sec_;
+  friend class rtc_units_impl::UnitBase<DataRate>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = true;
 };
 
-inline DataRate operator*(const DataRate& rate, const double& scalar) {
-  return DataRate::bps(std::round(rate.bps() * scalar));
-}
-inline DataRate operator*(const double& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-inline DataRate operator*(const DataRate& rate, const int64_t& scalar) {
-  return DataRate::bps(rate.bps() * scalar);
-}
-inline DataRate operator*(const int64_t& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-inline DataRate operator*(const DataRate& rate, const int32_t& scalar) {
-  return DataRate::bps(rate.bps() * scalar);
-}
-inline DataRate operator*(const int32_t& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-
-inline DataRate operator/(const DataSize& size, const TimeDelta& duration) {
+inline DataRate operator/(const DataSize size, const TimeDelta duration) {
   return DataRate::bps(data_rate_impl::Microbits(size) / duration.us());
 }
-inline TimeDelta operator/(const DataSize& size, const DataRate& rate) {
+inline TimeDelta operator/(const DataSize size, const DataRate rate) {
   return TimeDelta::us(data_rate_impl::Microbits(size) / rate.bps());
 }
-inline DataSize operator*(const DataRate& rate, const TimeDelta& duration) {
+inline DataSize operator*(const DataRate rate, const TimeDelta duration) {
   int64_t microbits = rate.bps() * duration.us();
   return DataSize::bytes((microbits + 4000000) / 8000000);
 }
-inline DataSize operator*(const TimeDelta& duration, const DataRate& rate) {
+inline DataSize operator*(const TimeDelta duration, const DataRate rate) {
   return rate * duration;
 }
 
-std::string ToString(const DataRate& value);
+std::string ToString(DataRate value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/data_rate_unittest.cc b/api/units/data_rate_unittest.cc
index 8e5b660..996298c 100644
--- a/api/units/data_rate_unittest.cc
+++ b/api/units/data_rate_unittest.cc
@@ -130,6 +130,9 @@
 
   EXPECT_EQ(rate_a / rate_b, static_cast<double>(kValueA) / kValueB);
 
+  EXPECT_EQ((rate_a / 10).bps(), kValueA / 10);
+  EXPECT_NEAR((rate_a / 0.5).bps(), kValueA * 2, 1);
+
   DataRate mutable_rate = DataRate::bps(kValueA);
   mutable_rate += rate_b;
   EXPECT_EQ(mutable_rate.bps(), kValueA + kValueB);
diff --git a/api/units/data_size.cc b/api/units/data_size.cc
index 4440f89..8a87786 100644
--- a/api/units/data_size.cc
+++ b/api/units/data_size.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const DataSize& value) {
+std::string ToString(DataSize value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/data_size.h b/api/units/data_size.h
index 8958b24..b4cbb65 100644
--- a/api/units/data_size.h
+++ b/api/units/data_size.h
@@ -15,143 +15,44 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <cmath>
-#include <limits>
 #include <string>
 #include <type_traits>
 
-#include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
-namespace data_size_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-}  // namespace data_size_impl
-
 // DataSize is a class represeting a count of bytes.
-class DataSize {
+class DataSize final : public rtc_units_impl::RelativeUnit<DataSize> {
  public:
   DataSize() = delete;
-  static constexpr DataSize Zero() { return DataSize(0); }
-  static constexpr DataSize Infinity() {
-    return DataSize(data_size_impl::kPlusInfinityVal);
-  }
+  static constexpr DataSize Infinity() { return PlusInfinity(); }
   template <int64_t bytes>
   static constexpr DataSize Bytes() {
-    static_assert(bytes >= 0, "");
-    static_assert(bytes < data_size_impl::kPlusInfinityVal, "");
-    return DataSize(bytes);
+    return FromStaticValue<bytes>();
   }
 
   template <
       typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+      typename std::enable_if<std::is_arithmetic<T>::value>::type* = nullptr>
   static DataSize bytes(T bytes) {
-    RTC_DCHECK_GE(bytes, 0);
-    RTC_DCHECK_LT(bytes, data_size_impl::kPlusInfinityVal);
-    return DataSize(rtc::dchecked_cast<int64_t>(bytes));
+    return FromValue(bytes);
   }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataSize bytes(T bytes) {
-    if (bytes == std::numeric_limits<T>::infinity()) {
-      return Infinity();
-    } else {
-      RTC_DCHECK(!std::isnan(bytes));
-      RTC_DCHECK_GE(bytes, 0);
-      RTC_DCHECK_LT(bytes, data_size_impl::kPlusInfinityVal);
-      return DataSize(rtc::dchecked_cast<int64_t>(bytes));
-    }
-  }
-
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type bytes() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(bytes_);
-  }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  bytes() const {
-    return IsInfinite() ? std::numeric_limits<T>::infinity() : bytes_;
+  typename std::enable_if<std::is_arithmetic<T>::value, T>::type bytes() const {
+    return ToValue<T>();
   }
 
   constexpr int64_t bytes_or(int64_t fallback_value) const {
-    return IsFinite() ? bytes_ : fallback_value;
-  }
-
-  constexpr bool IsZero() const { return bytes_ == 0; }
-  constexpr bool IsInfinite() const {
-    return bytes_ == data_size_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  DataSize operator-(const DataSize& other) const {
-    return DataSize::bytes(bytes() - other.bytes());
-  }
-  DataSize operator+(const DataSize& other) const {
-    return DataSize::bytes(bytes() + other.bytes());
-  }
-  DataSize& operator-=(const DataSize& other) {
-    *this = *this - other;
-    return *this;
-  }
-  DataSize& operator+=(const DataSize& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const DataSize& other) const {
-    return bytes<double>() / other.bytes<double>();
-  }
-  constexpr bool operator==(const DataSize& other) const {
-    return bytes_ == other.bytes_;
-  }
-  constexpr bool operator!=(const DataSize& other) const {
-    return bytes_ != other.bytes_;
-  }
-  constexpr bool operator<=(const DataSize& other) const {
-    return bytes_ <= other.bytes_;
-  }
-  constexpr bool operator>=(const DataSize& other) const {
-    return bytes_ >= other.bytes_;
-  }
-  constexpr bool operator>(const DataSize& other) const {
-    return bytes_ > other.bytes_;
-  }
-  constexpr bool operator<(const DataSize& other) const {
-    return bytes_ < other.bytes_;
+    return ToValueOr(fallback_value);
   }
 
  private:
-  explicit constexpr DataSize(int64_t bytes) : bytes_(bytes) {}
-  int64_t bytes_;
+  friend class rtc_units_impl::UnitBase<DataSize>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = true;
 };
 
-inline DataSize operator*(const DataSize& size, const double& scalar) {
-  return DataSize::bytes(std::round(size.bytes() * scalar));
-}
-inline DataSize operator*(const double& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator*(const DataSize& size, const int64_t& scalar) {
-  return DataSize::bytes(size.bytes() * scalar);
-}
-inline DataSize operator*(const int64_t& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator*(const DataSize& size, const int32_t& scalar) {
-  return DataSize::bytes(size.bytes() * scalar);
-}
-inline DataSize operator*(const int32_t& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator/(const DataSize& size, const int64_t& scalar) {
-  return DataSize::bytes(size.bytes() / scalar);
-}
-
-std::string ToString(const DataSize& value);
+std::string ToString(DataSize value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/time_delta.cc b/api/units/time_delta.cc
index d38387a..f90451b 100644
--- a/api/units/time_delta.cc
+++ b/api/units/time_delta.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const TimeDelta& value) {
+std::string ToString(TimeDelta value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsPlusInfinity()) {
diff --git a/api/units/time_delta.h b/api/units/time_delta.h
index 74b5385..6458369 100644
--- a/api/units/time_delta.h
+++ b/api/units/time_delta.h
@@ -15,22 +15,13 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <cmath>
 #include <cstdlib>
-#include <limits>
 #include <string>
 #include <type_traits>
 
-#include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
-namespace timedelta_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-constexpr int64_t kMinusInfinityVal = std::numeric_limits<int64_t>::min();
-}  // namespace timedelta_impl
-
 // TimeDelta represents the difference between two timestamps. Commonly this can
 // be a duration. However since two Timestamps are not guaranteed to have the
 // same epoch (they might come from different computers, making exact
@@ -38,245 +29,69 @@
 // undefined. To simplify usage, it can be constructed and converted to
 // different units, specifically seconds (s), milliseconds (ms) and
 // microseconds (us).
-class TimeDelta {
+class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
  public:
   TimeDelta() = delete;
-  static constexpr TimeDelta Zero() { return TimeDelta(0); }
-  static constexpr TimeDelta PlusInfinity() {
-    return TimeDelta(timedelta_impl::kPlusInfinityVal);
-  }
-  static constexpr TimeDelta MinusInfinity() {
-    return TimeDelta(timedelta_impl::kMinusInfinityVal);
-  }
   template <int64_t seconds>
   static constexpr TimeDelta Seconds() {
-    static_assert(seconds > timedelta_impl::kMinusInfinityVal / 1000000, "");
-    static_assert(seconds < timedelta_impl::kPlusInfinityVal / 1000000, "");
-    return TimeDelta(seconds * 1000000);
+    return FromStaticFraction<seconds, 1000000>();
   }
   template <int64_t ms>
   static constexpr TimeDelta Millis() {
-    static_assert(ms > timedelta_impl::kMinusInfinityVal / 1000, "");
-    static_assert(ms < timedelta_impl::kPlusInfinityVal / 1000, "");
-    return TimeDelta(ms * 1000);
+    return FromStaticFraction<ms, 1000>();
   }
   template <int64_t us>
   static constexpr TimeDelta Micros() {
-    static_assert(us > timedelta_impl::kMinusInfinityVal, "");
-    static_assert(us < timedelta_impl::kPlusInfinityVal, "");
-    return TimeDelta(us);
+    return FromStaticValue<us>();
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta seconds(T seconds) {
-    RTC_DCHECK_GT(seconds, timedelta_impl::kMinusInfinityVal / 1000000);
-    RTC_DCHECK_LT(seconds, timedelta_impl::kPlusInfinityVal / 1000000);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(seconds) * 1000000);
+    return FromFraction<1000000>(seconds);
   }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta ms(T milliseconds) {
-    RTC_DCHECK_GT(milliseconds, timedelta_impl::kMinusInfinityVal / 1000);
-    RTC_DCHECK_LT(milliseconds, timedelta_impl::kPlusInfinityVal / 1000);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(milliseconds) * 1000);
+    return FromFraction<1000>(milliseconds);
   }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta us(T microseconds) {
-    RTC_DCHECK_GT(microseconds, timedelta_impl::kMinusInfinityVal);
-    RTC_DCHECK_LT(microseconds, timedelta_impl::kPlusInfinityVal);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(microseconds));
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta seconds(T seconds) {
-    return TimeDelta::us(seconds * 1e6);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta ms(T milliseconds) {
-    return TimeDelta::us(milliseconds * 1e3);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta us(T microseconds) {
-    if (microseconds == std::numeric_limits<T>::infinity()) {
-      return PlusInfinity();
-    } else if (microseconds == -std::numeric_limits<T>::infinity()) {
-      return MinusInfinity();
-    } else {
-      RTC_DCHECK(!std::isnan(microseconds));
-      RTC_DCHECK_GT(microseconds, timedelta_impl::kMinusInfinityVal);
-      RTC_DCHECK_LT(microseconds, timedelta_impl::kPlusInfinityVal);
-      return TimeDelta(rtc::dchecked_cast<int64_t>(microseconds));
-    }
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type seconds() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeSeconds());
+    return FromValue(microseconds);
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ms() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeMillis());
+  T seconds() const {
+    return ToFraction<1000000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type us() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(microseconds_);
+  T ms() const {
+    return ToFraction<1000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ns() const {
-    RTC_DCHECK_GE(us(), std::numeric_limits<T>::min() / 1000);
-    RTC_DCHECK_LE(us(), std::numeric_limits<T>::max() / 1000);
-    return rtc::dchecked_cast<T>(us() * 1000);
+  T us() const {
+    return ToValue<T>();
   }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  seconds() const {
-    return us<T>() * 1e-6;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ms() const {
-    return us<T>() * 1e-3;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  us() const {
-    return IsPlusInfinity()
-               ? std::numeric_limits<T>::infinity()
-               : IsMinusInfinity() ? -std::numeric_limits<T>::infinity()
-                                   : microseconds_;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ns() const {
-    return us<T>() * 1e3;
+  template <typename T = int64_t>
+  T ns() const {
+    return ToMultiple<1000, T>();
   }
 
   constexpr int64_t seconds_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeSeconds() : fallback_value;
+    return ToFractionOr<1000000>(fallback_value);
   }
   constexpr int64_t ms_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeMillis() : fallback_value;
+    return ToFractionOr<1000>(fallback_value);
   }
   constexpr int64_t us_or(int64_t fallback_value) const {
-    return IsFinite() ? microseconds_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
 
   TimeDelta Abs() const { return TimeDelta::us(std::abs(us())); }
-  constexpr bool IsZero() const { return microseconds_ == 0; }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  constexpr bool IsInfinite() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal ||
-           microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  constexpr bool IsPlusInfinity() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsMinusInfinity() const {
-    return microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  TimeDelta operator+(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsPlusInfinity()) {
-      RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
-      return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsMinusInfinity()) {
-      RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
-      return MinusInfinity();
-    }
-    return TimeDelta::us(us() + other.us());
-  }
-  TimeDelta operator-(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsMinusInfinity()) {
-      RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
-      return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsPlusInfinity()) {
-      RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
-      return MinusInfinity();
-    }
-    return TimeDelta::us(us() - other.us());
-  }
-  TimeDelta& operator-=(const TimeDelta& other) {
-    *this = *this - other;
-    return *this;
-  }
-  TimeDelta& operator+=(const TimeDelta& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const TimeDelta& other) const {
-    return us<double>() / other.us<double>();
-  }
-  constexpr bool operator==(const TimeDelta& other) const {
-    return microseconds_ == other.microseconds_;
-  }
-  constexpr bool operator!=(const TimeDelta& other) const {
-    return microseconds_ != other.microseconds_;
-  }
-  constexpr bool operator<=(const TimeDelta& other) const {
-    return microseconds_ <= other.microseconds_;
-  }
-  constexpr bool operator>=(const TimeDelta& other) const {
-    return microseconds_ >= other.microseconds_;
-  }
-  constexpr bool operator>(const TimeDelta& other) const {
-    return microseconds_ > other.microseconds_;
-  }
-  constexpr bool operator<(const TimeDelta& other) const {
-    return microseconds_ < other.microseconds_;
-  }
 
  private:
-  explicit constexpr TimeDelta(int64_t us) : microseconds_(us) {}
-  constexpr int64_t UnsafeSeconds() const {
-    return (microseconds_ + (microseconds_ >= 0 ? 500000 : -500000)) / 1000000;
-  }
-  constexpr int64_t UnsafeMillis() const {
-    return (microseconds_ + (microseconds_ >= 0 ? 500 : -500)) / 1000;
-  }
-  int64_t microseconds_;
+  friend class rtc_units_impl::UnitBase<TimeDelta>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = false;
 };
 
-inline TimeDelta operator*(const TimeDelta& delta, const double& scalar) {
-  return TimeDelta::us(std::round(delta.us() * scalar));
-}
-inline TimeDelta operator*(const double& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-inline TimeDelta operator*(const TimeDelta& delta, const int64_t& scalar) {
-  return TimeDelta::us(delta.us() * scalar);
-}
-inline TimeDelta operator*(const int64_t& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-inline TimeDelta operator*(const TimeDelta& delta, const int32_t& scalar) {
-  return TimeDelta::us(delta.us() * scalar);
-}
-inline TimeDelta operator*(const int32_t& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-
-inline TimeDelta operator/(const TimeDelta& delta, const int64_t& scalar) {
-  return TimeDelta::us(delta.us() / scalar);
-}
-std::string ToString(const TimeDelta& value);
+std::string ToString(TimeDelta value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/time_delta_unittest.cc b/api/units/time_delta_unittest.cc
index bf8bbce..a46ba83 100644
--- a/api/units/time_delta_unittest.cc
+++ b/api/units/time_delta_unittest.cc
@@ -10,6 +10,8 @@
 
 #include "api/units/time_delta.h"
 
+#include <limits>
+
 #include "test/gtest.h"
 
 namespace webrtc {
@@ -106,6 +108,27 @@
   EXPECT_LT(TimeDelta::MinusInfinity(), TimeDelta::Zero());
 }
 
+TEST(TimeDeltaTest, Clamping) {
+  const TimeDelta upper = TimeDelta::ms(800);
+  const TimeDelta lower = TimeDelta::ms(100);
+  const TimeDelta under = TimeDelta::ms(100);
+  const TimeDelta inside = TimeDelta::ms(500);
+  const TimeDelta over = TimeDelta::ms(1000);
+  EXPECT_EQ(under.Clamped(lower, upper), lower);
+  EXPECT_EQ(inside.Clamped(lower, upper), inside);
+  EXPECT_EQ(over.Clamped(lower, upper), upper);
+
+  TimeDelta mutable_delta = lower;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, lower);
+  mutable_delta = inside;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, inside);
+  mutable_delta = over;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, upper);
+}
+
 TEST(TimeDeltaTest, CanBeInititializedFromLargeInt) {
   const int kMaxInt = std::numeric_limits<int>::max();
   EXPECT_EQ(TimeDelta::seconds(kMaxInt).us(),
diff --git a/api/units/timestamp.cc b/api/units/timestamp.cc
index feb1447..d3417cf 100644
--- a/api/units/timestamp.cc
+++ b/api/units/timestamp.cc
@@ -13,7 +13,7 @@
 #include "rtc_base/strings/string_builder.h"
 
 namespace webrtc {
-std::string ToString(const Timestamp& value) {
+std::string ToString(Timestamp value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/timestamp.h b/api/units/timestamp.h
index 80f1839..a6e450f 100644
--- a/api/units/timestamp.h
+++ b/api/units/timestamp.h
@@ -15,191 +15,94 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <math.h>
-#include <stdint.h>
-#include <limits>
 #include <string>
 #include <type_traits>
 
 #include "api/units/time_delta.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
 
 namespace webrtc {
-namespace timestamp_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-constexpr int64_t kMinusInfinityVal = std::numeric_limits<int64_t>::min();
-}  // namespace timestamp_impl
-
 // Timestamp represents the time that has passed since some unspecified epoch.
 // The epoch is assumed to be before any represented timestamps, this means that
 // negative values are not valid. The most notable feature is that the
 // difference of two Timestamps results in a TimeDelta.
-class Timestamp {
+class Timestamp final : public rtc_units_impl::UnitBase<Timestamp> {
  public:
   Timestamp() = delete;
-  static constexpr Timestamp PlusInfinity() {
-    return Timestamp(timestamp_impl::kPlusInfinityVal);
-  }
-  static constexpr Timestamp MinusInfinity() {
-    return Timestamp(timestamp_impl::kMinusInfinityVal);
-  }
+
   template <int64_t seconds>
   static constexpr Timestamp Seconds() {
-    static_assert(seconds >= 0, "");
-    static_assert(seconds < timestamp_impl::kPlusInfinityVal / 1000000, "");
-    return Timestamp(seconds * 1000000);
+    return FromStaticFraction<seconds, 1000000>();
   }
   template <int64_t ms>
   static constexpr Timestamp Millis() {
-    static_assert(ms >= 0, "");
-    static_assert(ms < timestamp_impl::kPlusInfinityVal / 1000, "");
-    return Timestamp(ms * 1000);
+    return FromStaticFraction<ms, 1000>();
   }
   template <int64_t us>
   static constexpr Timestamp Micros() {
-    static_assert(us >= 0, "");
-    static_assert(us < timestamp_impl::kPlusInfinityVal, "");
-    return Timestamp(us);
+    return FromStaticValue<us>();
   }
 
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp seconds(T seconds) {
-    RTC_DCHECK_GE(seconds, 0);
-    RTC_DCHECK_LT(seconds, timestamp_impl::kPlusInfinityVal / 1000000);
-    return Timestamp(rtc::dchecked_cast<int64_t>(seconds) * 1000000);
+    return FromFraction<1000000>(seconds);
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp ms(T milliseconds) {
-    RTC_DCHECK_GE(milliseconds, 0);
-    RTC_DCHECK_LT(milliseconds, timestamp_impl::kPlusInfinityVal / 1000);
-    return Timestamp(rtc::dchecked_cast<int64_t>(milliseconds) * 1000);
+    return FromFraction<1000>(milliseconds);
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp us(T microseconds) {
-    RTC_DCHECK_GE(microseconds, 0);
-    RTC_DCHECK_LT(microseconds, timestamp_impl::kPlusInfinityVal);
-    return Timestamp(rtc::dchecked_cast<int64_t>(microseconds));
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp seconds(T seconds) {
-    return Timestamp::us(seconds * 1e6);
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp ms(T milliseconds) {
-    return Timestamp::us(milliseconds * 1e3);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp us(T microseconds) {
-    if (microseconds == std::numeric_limits<double>::infinity()) {
-      return PlusInfinity();
-    } else if (microseconds == -std::numeric_limits<double>::infinity()) {
-      return MinusInfinity();
-    } else {
-      RTC_DCHECK(!std::isnan(microseconds));
-      RTC_DCHECK_GE(microseconds, 0);
-      RTC_DCHECK_LT(microseconds, timestamp_impl::kPlusInfinityVal);
-      return Timestamp(rtc::dchecked_cast<int64_t>(microseconds));
-    }
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type seconds() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeSeconds());
+    return FromValue(microseconds);
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ms() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeMillis());
+  T seconds() const {
+    return ToFraction<1000000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type us() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(microseconds_);
+  T ms() const {
+    return ToFraction<1000, T>();
   }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  seconds() const {
-    return us<T>() * 1e-6;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ms() const {
-    return us<T>() * 1e-3;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  us() const {
-    return IsPlusInfinity()
-               ? std::numeric_limits<T>::infinity()
-               : IsMinusInfinity() ? -std::numeric_limits<T>::infinity()
-                                   : microseconds_;
+  template <typename T = int64_t>
+  T us() const {
+    return ToValue<T>();
   }
 
   constexpr int64_t seconds_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeSeconds() : fallback_value;
+    return ToFractionOr<1000000>(fallback_value);
   }
   constexpr int64_t ms_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeMillis() : fallback_value;
+    return ToFractionOr<1000>(fallback_value);
   }
   constexpr int64_t us_or(int64_t fallback_value) const {
-    return IsFinite() ? microseconds_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
 
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  constexpr bool IsInfinite() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal ||
-           microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  constexpr bool IsPlusInfinity() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsMinusInfinity() const {
-    return microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  Timestamp operator+(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsPlusInfinity()) {
+  Timestamp operator+(const TimeDelta delta) const {
+    if (IsPlusInfinity() || delta.IsPlusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
+      RTC_DCHECK(!delta.IsMinusInfinity());
       return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsMinusInfinity()) {
+    } else if (IsMinusInfinity() || delta.IsMinusInfinity()) {
       RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
+      RTC_DCHECK(!delta.IsPlusInfinity());
       return MinusInfinity();
     }
-    return Timestamp::us(us() + other.us());
+    return Timestamp::us(us() + delta.us());
   }
-  Timestamp operator-(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsMinusInfinity()) {
+  Timestamp operator-(const TimeDelta delta) const {
+    if (IsPlusInfinity() || delta.IsMinusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
+      RTC_DCHECK(!delta.IsPlusInfinity());
       return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsPlusInfinity()) {
+    } else if (IsMinusInfinity() || delta.IsPlusInfinity()) {
       RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
+      RTC_DCHECK(!delta.IsMinusInfinity());
       return MinusInfinity();
     }
-    return Timestamp::us(us() - other.us());
+    return Timestamp::us(us() - delta.us());
   }
-  TimeDelta operator-(const Timestamp& other) const {
+  TimeDelta operator-(const Timestamp other) const {
     if (IsPlusInfinity() || other.IsMinusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
       RTC_DCHECK(!other.IsPlusInfinity());
@@ -211,45 +114,22 @@
     }
     return TimeDelta::us(us() - other.us());
   }
-  Timestamp& operator-=(const TimeDelta& other) {
-    *this = *this - other;
+  Timestamp& operator-=(const TimeDelta delta) {
+    *this = *this - delta;
     return *this;
   }
-  Timestamp& operator+=(const TimeDelta& other) {
-    *this = *this + other;
+  Timestamp& operator+=(const TimeDelta delta) {
+    *this = *this + delta;
     return *this;
   }
-  constexpr bool operator==(const Timestamp& other) const {
-    return microseconds_ == other.microseconds_;
-  }
-  constexpr bool operator!=(const Timestamp& other) const {
-    return microseconds_ != other.microseconds_;
-  }
-  constexpr bool operator<=(const Timestamp& other) const {
-    return microseconds_ <= other.microseconds_;
-  }
-  constexpr bool operator>=(const Timestamp& other) const {
-    return microseconds_ >= other.microseconds_;
-  }
-  constexpr bool operator>(const Timestamp& other) const {
-    return microseconds_ > other.microseconds_;
-  }
-  constexpr bool operator<(const Timestamp& other) const {
-    return microseconds_ < other.microseconds_;
-  }
 
  private:
-  explicit constexpr Timestamp(int64_t us) : microseconds_(us) {}
-  constexpr int64_t UnsafeSeconds() const {
-    return (microseconds_ + 500000) / 1000000;
-  }
-  constexpr int64_t UnsafeMillis() const {
-    return (microseconds_ + 500) / 1000;
-  }
-  int64_t microseconds_;
+  friend class rtc_units_impl::UnitBase<Timestamp>;
+  using UnitBase::UnitBase;
+  static constexpr bool one_sided = true;
 };
 
-std::string ToString(const Timestamp& value);
+std::string ToString(Timestamp value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/video/color_space.cc b/api/video/color_space.cc
index a8be5cd..ad138ab 100644
--- a/api/video/color_space.cc
+++ b/api/video/color_space.cc
@@ -10,18 +10,72 @@
 
 #include "api/video/color_space.h"
 
+namespace {
+// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created
+// by the funciton below. Returns true if conversion was successful, false
+// otherwise.
+template <typename T>
+bool SetFromUint8(uint8_t enum_value, uint64_t enum_bitmask, T* out) {
+  if ((enum_value < 64) && ((enum_bitmask >> enum_value) & 1)) {
+    *out = static_cast<T>(enum_value);
+    return true;
+  }
+  return false;
+}
+
+// This function serves as an assert for the constexpr function below. It's on
+// purpose not declared as constexpr so that it causes a build problem if enum
+// values of 64 or above are used. The bitmask and the code generating it would
+// have to be extended if the standard is updated to include enum values >= 64.
+int EnumMustBeLessThan64() {
+  return -1;
+}
+
+template <typename T, size_t N>
+constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
+  return length > 1
+             ? (MakeMask(index, 1, values) +
+                MakeMask(index + 1, length - 1, values))
+             : (static_cast<uint8_t>(values[index]) < 64
+                    ? (uint64_t{1} << static_cast<uint8_t>(values[index]))
+                    : EnumMustBeLessThan64());
+}
+
+// Create a bitmask where each bit corresponds to one potential enum value.
+// |values| should be an array listing all possible enum values. The bit is set
+// to one if the corresponding enum exists. Only works for enums with values
+// less than 64.
+template <typename T, size_t N>
+constexpr uint64_t CreateEnumBitmask(T (&values)[N]) {
+  return MakeMask(0, N, values);
+}
+
+}  // namespace
+
 namespace webrtc {
 
 ColorSpace::ColorSpace() = default;
+ColorSpace::ColorSpace(const ColorSpace& other) = default;
+ColorSpace::ColorSpace(ColorSpace&& other) = default;
+ColorSpace& ColorSpace::operator=(const ColorSpace& other) = default;
 
 ColorSpace::ColorSpace(PrimaryID primaries,
                        TransferID transfer,
                        MatrixID matrix,
                        RangeID range)
+    : ColorSpace(primaries, transfer, matrix, range, nullptr) {}
+
+ColorSpace::ColorSpace(PrimaryID primaries,
+                       TransferID transfer,
+                       MatrixID matrix,
+                       RangeID range,
+                       const HdrMetadata* hdr_metadata)
     : primaries_(primaries),
       transfer_(transfer),
       matrix_(matrix),
-      range_(range) {}
+      range_(range),
+      hdr_metadata_(hdr_metadata ? absl::make_optional(*hdr_metadata)
+                                 : absl::nullopt) {}
 
 ColorSpace::PrimaryID ColorSpace::primaries() const {
   return primaries_;
@@ -39,4 +93,61 @@
   return range_;
 }
 
+const HdrMetadata* ColorSpace::hdr_metadata() const {
+  return hdr_metadata_ ? &*hdr_metadata_ : nullptr;
+}
+
+bool ColorSpace::set_primaries_from_uint8(uint8_t enum_value) {
+  constexpr PrimaryID kPrimaryIds[] = {
+      PrimaryID::kInvalid,    PrimaryID::kBT709,      PrimaryID::kUNSPECIFIED,
+      PrimaryID::kBT470M,     PrimaryID::kBT470BG,    PrimaryID::kSMPTE170M,
+      PrimaryID::kSMPTE240M,  PrimaryID::kFILM,       PrimaryID::kBT2020,
+      PrimaryID::kSMPTEST428, PrimaryID::kSMPTEST431, PrimaryID::kSMPTEST432,
+      PrimaryID::kJEDECP22};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kPrimaryIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &primaries_);
+}
+
+bool ColorSpace::set_transfer_from_uint8(uint8_t enum_value) {
+  constexpr TransferID kTransferIds[] = {
+      TransferID::kInvalid,      TransferID::kBT709,
+      TransferID::kUNSPECIFIED,  TransferID::kGAMMA22,
+      TransferID::kGAMMA28,      TransferID::kSMPTE170M,
+      TransferID::kSMPTE240M,    TransferID::kLINEAR,
+      TransferID::kLOG,          TransferID::kLOG_SQRT,
+      TransferID::kIEC61966_2_4, TransferID::kBT1361_ECG,
+      TransferID::kIEC61966_2_1, TransferID::kBT2020_10,
+      TransferID::kBT2020_12,    TransferID::kSMPTEST2084,
+      TransferID::kSMPTEST428,   TransferID::kARIB_STD_B67};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kTransferIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &transfer_);
+}
+
+bool ColorSpace::set_matrix_from_uint8(uint8_t enum_value) {
+  constexpr MatrixID kMatrixIds[] = {
+      MatrixID::kRGB,       MatrixID::kBT709,        MatrixID::kUNSPECIFIED,
+      MatrixID::kFCC,       MatrixID::kBT470BG,      MatrixID::kSMPTE170M,
+      MatrixID::kSMPTE240M, MatrixID::kYCOCG,        MatrixID::kBT2020_NCL,
+      MatrixID::kBT2020_CL, MatrixID::kSMPTE2085,    MatrixID::kCDNCLS,
+      MatrixID::kCDCLS,     MatrixID::kBT2100_ICTCP, MatrixID::kInvalid};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kMatrixIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &matrix_);
+}
+
+bool ColorSpace::set_range_from_uint8(uint8_t enum_value) {
+  constexpr RangeID kRangeIds[] = {RangeID::kInvalid, RangeID::kLimited,
+                                   RangeID::kFull, RangeID::kDerived};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kRangeIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &range_);
+}
+
+void ColorSpace::set_hdr_metadata(const HdrMetadata* hdr_metadata) {
+  hdr_metadata_ =
+      hdr_metadata ? absl::make_optional(*hdr_metadata) : absl::nullopt;
+}
+
 }  // namespace webrtc
diff --git a/api/video/color_space.h b/api/video/color_space.h
index 8102647..79a15f5 100644
--- a/api/video/color_space.h
+++ b/api/video/color_space.h
@@ -11,95 +11,145 @@
 #ifndef API_VIDEO_COLOR_SPACE_H_
 #define API_VIDEO_COLOR_SPACE_H_
 
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/video/hdr_metadata.h"
+
 namespace webrtc {
 
-// Used to represent a color space for the purpose of color conversion. This
-// class only represents color information that can be transferred through the
-// bitstream of WebRTC's internal supported codecs:
+// This class represents color information as specified in T-REC H.273,
+// available from https://www.itu.int/rec/T-REC-H.273.
+//
+// WebRTC's supported codecs:
 // - VP9 supports color profiles, see VP9 Bitstream & Decoding Process
 // Specification Version 0.6 Section 7.2.2 "Color config semantics" available
 // from https://www.webmproject.org.
-// TODO(emircan): Extract these values from decode and add to the existing ones.
 // - VP8 only supports BT.601, see
 // https://tools.ietf.org/html/rfc6386#section-9.2
-// - H264 supports different color primaries, transfer characteristics, matrix
-// coefficients and range. See T-REC-H.264 E.2.1, "VUI parameters semantics",
-// available from https://www.itu.int/rec/T-REC-H.264.
+// - H264 uses the exact same representation as T-REC H.273. See T-REC-H.264
+// E.2.1, "VUI parameters semantics", available from
+// https://www.itu.int/rec/T-REC-H.264.
+
 class ColorSpace {
  public:
-  enum class PrimaryID {
-    kInvalid,
-    kBT709,
-    kBT470M,
-    kBT470BG,
-    kSMPTE170M,  // Identical to BT601
-    kSMPTE240M,
-    kFILM,
-    kBT2020,
-    kSMPTEST428,
-    kSMPTEST431,
-    kSMPTEST432,
-    kJEDECP22,
+  enum class PrimaryID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 2.
+    kInvalid = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kBT470M = 4,
+    kBT470BG = 5,
+    kSMPTE170M = 6,  // Identical to BT601
+    kSMPTE240M = 7,
+    kFILM = 8,
+    kBT2020 = 9,
+    kSMPTEST428 = 10,
+    kSMPTEST431 = 11,
+    kSMPTEST432 = 12,
+    kJEDECP22 = 22,  // Identical to EBU3213-E
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kPrimaryIds.
   };
 
-  enum class TransferID {
-    kInvalid,
-    kBT709,
-    kGAMMA22,
-    kGAMMA28,
-    kSMPTE170M,
-    kSMPTE240M,
-    kLINEAR,
-    kLOG,
-    kLOG_SQRT,
-    kIEC61966_2_4,
-    kBT1361_ECG,
-    kIEC61966_2_1,
-    kBT2020_10,
-    kBT2020_12,
-    kSMPTEST2084,
-    kSMPTEST428,
-    kARIB_STD_B67,
+  enum class TransferID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 3.
+    kInvalid = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kGAMMA22 = 4,
+    kGAMMA28 = 5,
+    kSMPTE170M = 6,
+    kSMPTE240M = 7,
+    kLINEAR = 8,
+    kLOG = 9,
+    kLOG_SQRT = 10,
+    kIEC61966_2_4 = 11,
+    kBT1361_ECG = 12,
+    kIEC61966_2_1 = 13,
+    kBT2020_10 = 14,
+    kBT2020_12 = 15,
+    kSMPTEST2084 = 16,
+    kSMPTEST428 = 17,
+    kARIB_STD_B67 = 18,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kTransferIds.
   };
 
-  enum class MatrixID {
-    kInvalid,
-    kRGB,
-    kBT709,
-    kFCC,
-    kBT470BG,
-    kSMPTE170M,
-    kSMPTE240M,
-    kYCOCG,
-    kBT2020_NCL,
-    kBT2020_CL,
-    kSMPTE2085,
+  enum class MatrixID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 4.
+    kRGB = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kFCC = 4,
+    kBT470BG = 5,
+    kSMPTE170M = 6,
+    kSMPTE240M = 7,
+    kYCOCG = 8,
+    kBT2020_NCL = 9,
+    kBT2020_CL = 10,
+    kSMPTE2085 = 11,
+    kCDNCLS = 12,
+    kCDCLS = 13,
+    kBT2100_ICTCP = 14,
+    kInvalid = 63,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kMatrixIds.
   };
 
   enum class RangeID {
-    kInvalid,
+    // The indices are equal to the values specified at
+    // https://www.webmproject.org/docs/container/#colour for the element Range.
+    kInvalid = 0,
     // Limited Rec. 709 color range with RGB values ranging from 16 to 235.
-    kLimited,
+    kLimited = 1,
     // Full RGB color range with RGB valees from 0 to 255.
-    kFull,
+    kFull = 2,
+    // Range is defined by MatrixCoefficients/TransferCharacteristics.
+    kDerived = 3,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kRangeIds.
   };
 
   ColorSpace();
+  ColorSpace(const ColorSpace& other);
+  ColorSpace(ColorSpace&& other);
+  ColorSpace& operator=(const ColorSpace& other);
   ColorSpace(PrimaryID primaries,
              TransferID transfer,
              MatrixID matrix,
              RangeID full_range);
+  ColorSpace(PrimaryID primaries,
+             TransferID transfer,
+             MatrixID matrix,
+             RangeID range,
+             const HdrMetadata* hdr_metadata);
+  bool operator==(const ColorSpace& other) const {
+    return primaries_ == other.primaries() && transfer_ == other.transfer() &&
+           matrix_ == other.matrix() && range_ == other.range() &&
+           ((hdr_metadata_.has_value() && other.hdr_metadata() &&
+             *hdr_metadata_ == *other.hdr_metadata()) ||
+            (!hdr_metadata_.has_value() && other.hdr_metadata() == nullptr));
+  }
 
   PrimaryID primaries() const;
   TransferID transfer() const;
   MatrixID matrix() const;
   RangeID range() const;
+  const HdrMetadata* hdr_metadata() const;
+
+  bool set_primaries_from_uint8(uint8_t enum_value);
+  bool set_transfer_from_uint8(uint8_t enum_value);
+  bool set_matrix_from_uint8(uint8_t enum_value);
+  bool set_range_from_uint8(uint8_t enum_value);
+  void set_hdr_metadata(const HdrMetadata* hdr_metadata);
 
  private:
   PrimaryID primaries_ = PrimaryID::kInvalid;
   TransferID transfer_ = TransferID::kInvalid;
   MatrixID matrix_ = MatrixID::kInvalid;
   RangeID range_ = RangeID::kInvalid;
+  absl::optional<HdrMetadata> hdr_metadata_;
 };
 
 }  // namespace webrtc
diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h
index 5c4a82d..a7c719c 100644
--- a/api/video/encoded_image.h
+++ b/api/video/encoded_image.h
@@ -14,7 +14,9 @@
 #include <stdint.h>
 
 #include "absl/types/optional.h"
+#include "api/video/color_space.h"
 #include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_codec_type.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_rotation.h"
 #include "api/video/video_timing.h"
@@ -49,14 +51,20 @@
   void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
 
   absl::optional<int> SpatialIndex() const {
-    if (spatial_index_ < 0)
-      return absl::nullopt;
     return spatial_index_;
   }
   void SetSpatialIndex(absl::optional<int> spatial_index) {
     RTC_DCHECK_GE(spatial_index.value_or(0), 0);
     RTC_DCHECK_LT(spatial_index.value_or(0), kMaxSpatialLayers);
-    spatial_index_ = spatial_index.value_or(-1);
+    spatial_index_ = spatial_index;
+  }
+
+  const webrtc::ColorSpace* ColorSpace() const {
+    return color_space_ ? &*color_space_ : nullptr;
+  }
+  void SetColorSpace(const webrtc::ColorSpace* color_space) {
+    color_space_ =
+        color_space ? absl::make_optional(*color_space) : absl::nullopt;
   }
 
   uint32_t _encodedWidth = 0;
@@ -92,9 +100,8 @@
 
  private:
   uint32_t timestamp_rtp_ = 0;
-  // -1 means not set. Use a plain int rather than optional, to keep this class
-  // copyable with memcpy.
-  int spatial_index_ = -1;
+  absl::optional<int> spatial_index_;
+  absl::optional<webrtc::ColorSpace> color_space_;
 };
 
 }  // namespace webrtc
diff --git a/api/video/hdr_metadata.cc b/api/video/hdr_metadata.cc
index bfe54ce..e2a669c 100644
--- a/api/video/hdr_metadata.cc
+++ b/api/video/hdr_metadata.cc
@@ -13,23 +13,9 @@
 namespace webrtc {
 
 HdrMasteringMetadata::Chromaticity::Chromaticity() = default;
-HdrMasteringMetadata::Chromaticity::Chromaticity(const Chromaticity& rhs) =
-    default;
-HdrMasteringMetadata::Chromaticity::Chromaticity(Chromaticity&& rhs) = default;
-HdrMasteringMetadata::Chromaticity& HdrMasteringMetadata::Chromaticity::
-operator=(const Chromaticity& rhs) = default;
 
 HdrMasteringMetadata::HdrMasteringMetadata() = default;
-HdrMasteringMetadata::HdrMasteringMetadata(const HdrMasteringMetadata& rhs) =
-    default;
-HdrMasteringMetadata::HdrMasteringMetadata(HdrMasteringMetadata&& rhs) =
-    default;
-HdrMasteringMetadata& HdrMasteringMetadata::operator=(
-    const HdrMasteringMetadata& rhs) = default;
 
 HdrMetadata::HdrMetadata() = default;
-HdrMetadata::HdrMetadata(const HdrMetadata& rhs) = default;
-HdrMetadata::HdrMetadata(HdrMetadata&& rhs) = default;
-HdrMetadata& HdrMetadata::operator=(const HdrMetadata& rhs) = default;
 
 }  // namespace webrtc
diff --git a/api/video/hdr_metadata.h b/api/video/hdr_metadata.h
index be0c173..676a900 100644
--- a/api/video/hdr_metadata.h
+++ b/api/video/hdr_metadata.h
@@ -30,9 +30,6 @@
     }
 
     Chromaticity();
-    Chromaticity(const Chromaticity& rhs);
-    Chromaticity(Chromaticity&& rhs);
-    Chromaticity& operator=(const Chromaticity& rhs);
   };
 
   // The nominal primaries of the mastering display.
@@ -54,9 +51,6 @@
   float luminance_min = 0.0f;
 
   HdrMasteringMetadata();
-  HdrMasteringMetadata(const HdrMasteringMetadata& rhs);
-  HdrMasteringMetadata(HdrMasteringMetadata&& rhs);
-  HdrMasteringMetadata& operator=(const HdrMasteringMetadata& rhs);
 
   bool operator==(const HdrMasteringMetadata& rhs) const {
     return ((primary_r == rhs.primary_r) && (primary_g == rhs.primary_g) &&
@@ -79,9 +73,6 @@
   uint32_t max_frame_average_light_level = 0;
 
   HdrMetadata();
-  HdrMetadata(const HdrMetadata& rhs);
-  HdrMetadata(HdrMetadata&& rhs);
-  HdrMetadata& operator=(const HdrMetadata& rhs);
 
   bool operator==(const HdrMetadata& rhs) const {
     return (
diff --git a/api/video/video_codec_type.h b/api/video/video_codec_type.h
new file mode 100644
index 0000000..447723c
--- /dev/null
+++ b/api/video/video_codec_type.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_CODEC_TYPE_H_
+#define API_VIDEO_VIDEO_CODEC_TYPE_H_
+
+namespace webrtc {
+
+// Video codec types
+enum VideoCodecType {
+  // There are various memset(..., 0, ...) calls in the code that rely on
+  // kVideoCodecGeneric being zero.
+  kVideoCodecGeneric = 0,
+  kVideoCodecVP8,
+  kVideoCodecVP9,
+  kVideoCodecH264,
+  kVideoCodecI420,
+  kVideoCodecMultiplex,
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_CODEC_TYPE_H_
diff --git a/api/video/video_frame.cc b/api/video/video_frame.cc
index 12da43f..eaae33b 100644
--- a/api/video/video_frame.cc
+++ b/api/video/video_frame.cc
@@ -21,7 +21,7 @@
 
 VideoFrame VideoFrame::Builder::build() {
   return VideoFrame(video_frame_buffer_, timestamp_us_, timestamp_rtp_,
-                    ntp_time_ms_, rotation_, color_space_, hdr_metadata_);
+                    ntp_time_ms_, rotation_, color_space_);
 }
 
 VideoFrame::Builder& VideoFrame::Builder::set_video_frame_buffer(
@@ -64,9 +64,10 @@
   return *this;
 }
 
-VideoFrame::Builder& VideoFrame::Builder::set_hdr_metadata(
-    const HdrMetadata& hdr_metadata) {
-  hdr_metadata_ = hdr_metadata;
+VideoFrame::Builder& VideoFrame::Builder::set_color_space(
+    const ColorSpace* color_space) {
+  color_space_ =
+      color_space ? absl::make_optional(*color_space) : absl::nullopt;
   return *this;
 }
 
@@ -96,15 +97,13 @@
                        uint32_t timestamp_rtp,
                        int64_t ntp_time_ms,
                        VideoRotation rotation,
-                       const absl::optional<ColorSpace>& color_space,
-                       const absl::optional<HdrMetadata>& hdr_metadata)
+                       const absl::optional<ColorSpace>& color_space)
     : video_frame_buffer_(buffer),
       timestamp_rtp_(timestamp_rtp),
       ntp_time_ms_(ntp_time_ms),
       timestamp_us_(timestamp_us),
       rotation_(rotation),
-      color_space_(color_space),
-      hdr_metadata_(hdr_metadata) {}
+      color_space_(color_space) {}
 
 VideoFrame::~VideoFrame() = default;
 
diff --git a/api/video/video_frame.h b/api/video/video_frame.h
index 58362b0..2c5d081 100644
--- a/api/video/video_frame.h
+++ b/api/video/video_frame.h
@@ -40,7 +40,7 @@
     Builder& set_ntp_time_ms(int64_t ntp_time_ms);
     Builder& set_rotation(VideoRotation rotation);
     Builder& set_color_space(const ColorSpace& color_space);
-    Builder& set_hdr_metadata(const HdrMetadata& hdr_metadata);
+    Builder& set_color_space(const ColorSpace* color_space);
 
    private:
     rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
@@ -49,7 +49,6 @@
     int64_t ntp_time_ms_ = 0;
     VideoRotation rotation_ = kVideoRotation_0;
     absl::optional<ColorSpace> color_space_;
-    absl::optional<HdrMetadata> hdr_metadata_;
   };
 
   // To be deprecated. Migrate all use to Builder.
@@ -116,10 +115,9 @@
   void set_rotation(VideoRotation rotation) { rotation_ = rotation; }
 
   // Get color space when available.
-  absl::optional<ColorSpace> color_space() const { return color_space_; }
-
-  // Get HDR metadata when available.
-  absl::optional<HdrMetadata> hdr_metadata() const { return hdr_metadata_; }
+  const ColorSpace* color_space() const {
+    return color_space_ ? &*color_space_ : nullptr;
+  }
 
   // Get render time in milliseconds.
   // TODO(nisse): Deprecated. Migrate all users to timestamp_us().
@@ -141,8 +139,7 @@
              uint32_t timestamp_rtp,
              int64_t ntp_time_ms,
              VideoRotation rotation,
-             const absl::optional<ColorSpace>& color_space,
-             const absl::optional<HdrMetadata>& hdr_metadata);
+             const absl::optional<ColorSpace>& color_space);
 
   // An opaque reference counted handle that stores the pixel data.
   rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
@@ -151,7 +148,6 @@
   int64_t timestamp_us_;
   VideoRotation rotation_;
   absl::optional<ColorSpace> color_space_;
-  absl::optional<HdrMetadata> hdr_metadata_;
 };
 
 }  // namespace webrtc
diff --git a/audio/BUILD.gn b/audio/BUILD.gn
index 4b2ec61..c045af6 100644
--- a/audio/BUILD.gn
+++ b/audio/BUILD.gn
@@ -26,19 +26,13 @@
     "audio_transport_impl.h",
     "channel_receive.cc",
     "channel_receive.h",
-    "channel_receive_proxy.cc",
-    "channel_receive_proxy.h",
     "channel_send.cc",
     "channel_send.h",
-    "channel_send_proxy.cc",
-    "channel_send_proxy.h",
     "conversion.h",
     "null_audio_poller.cc",
     "null_audio_poller.h",
     "remix_resample.cc",
     "remix_resample.h",
-    "time_interval.cc",
-    "time_interval.h",
     "transport_feedback_packet_loss_tracker.cc",
     "transport_feedback_packet_loss_tracker.h",
   ]
@@ -49,7 +43,6 @@
   }
 
   deps = [
-    "..:webrtc_common",
     "../api:array_view",
     "../api:call_api",
     "../api:libjingle_peerconnection_api",
@@ -131,7 +124,6 @@
       "remix_resample_unittest.cc",
       "test/audio_stats_test.cc",
       "test/media_transport_test.cc",
-      "time_interval_unittest.cc",
       "transport_feedback_packet_loss_tracker_unittest.cc",
     ]
     deps = [
@@ -155,6 +147,7 @@
       "../logging:mocks",
       "../logging:rtc_event_log_api",
       "../modules/audio_device:mock_audio_device",
+      "../rtc_base:rtc_base_tests_utils",
 
       # For TestAudioDeviceModule
       "../modules/audio_device:audio_device_impl",
@@ -168,7 +161,6 @@
       "../modules/utility",
       "../rtc_base:checks",
       "../rtc_base:rtc_base_approved",
-      "../rtc_base:rtc_base_tests_utils",
       "../rtc_base:rtc_task_queue",
       "../rtc_base:safe_compare",
       "../system_wrappers:system_wrappers",
diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc
index 4f2e29c..8d4afe0 100644
--- a/audio/audio_receive_stream.cc
+++ b/audio/audio_receive_stream.cc
@@ -21,11 +21,9 @@
 #include "audio/audio_send_stream.h"
 #include "audio/audio_state.h"
 #include "audio/channel_receive.h"
-#include "audio/channel_receive_proxy.h"
 #include "audio/conversion.h"
 #include "call/rtp_config.h"
 #include "call/rtp_stream_receiver_controller_interface.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/strings/string_builder.h"
@@ -68,7 +66,7 @@
 
 namespace internal {
 namespace {
-std::unique_ptr<voe::ChannelReceiveProxy> CreateChannelAndProxy(
+std::unique_ptr<voe::ChannelReceiveInterface> CreateChannelReceive(
     webrtc::AudioState* audio_state,
     ProcessThread* module_process_thread,
     const webrtc::AudioReceiveStream::Config& config,
@@ -76,13 +74,13 @@
   RTC_DCHECK(audio_state);
   internal::AudioState* internal_audio_state =
       static_cast<internal::AudioState*>(audio_state);
-  return absl::make_unique<voe::ChannelReceiveProxy>(
-      absl::make_unique<voe::ChannelReceive>(
-          module_process_thread, internal_audio_state->audio_device_module(),
-          config.media_transport, config.rtcp_send_transport, event_log,
-          config.rtp.remote_ssrc, config.jitter_buffer_max_packets,
-          config.jitter_buffer_fast_accelerate, config.decoder_factory,
-          config.codec_pair_id, config.frame_decryptor, config.crypto_options));
+  return voe::CreateChannelReceive(
+      module_process_thread, internal_audio_state->audio_device_module(),
+      config.media_transport, config.rtcp_send_transport, event_log,
+      config.rtp.remote_ssrc, config.jitter_buffer_max_packets,
+      config.jitter_buffer_fast_accelerate, config.jitter_buffer_min_delay_ms,
+      config.decoder_factory, config.codec_pair_id, config.frame_decryptor,
+      config.crypto_options);
 }
 }  // namespace
 
@@ -98,10 +96,10 @@
                          config,
                          audio_state,
                          event_log,
-                         CreateChannelAndProxy(audio_state.get(),
-                                               module_process_thread,
-                                               config,
-                                               event_log)) {}
+                         CreateChannelReceive(audio_state.get(),
+                                              module_process_thread,
+                                              config,
+                                              event_log)) {}
 
 AudioReceiveStream::AudioReceiveStream(
     RtpStreamReceiverControllerInterface* receiver_controller,
@@ -109,13 +107,13 @@
     const webrtc::AudioReceiveStream::Config& config,
     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
     webrtc::RtcEventLog* event_log,
-    std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy)
-    : audio_state_(audio_state), channel_proxy_(std::move(channel_proxy)) {
+    std::unique_ptr<voe::ChannelReceiveInterface> channel_receive)
+    : audio_state_(audio_state), channel_receive_(std::move(channel_receive)) {
   RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc;
   RTC_DCHECK(config.decoder_factory);
   RTC_DCHECK(config.rtcp_send_transport);
   RTC_DCHECK(audio_state_);
-  RTC_DCHECK(channel_proxy_);
+  RTC_DCHECK(channel_receive_);
 
   module_process_thread_checker_.DetachFromThread();
 
@@ -123,11 +121,11 @@
     RTC_DCHECK(receiver_controller);
     RTC_DCHECK(packet_router);
     // Configure bandwidth estimation.
-    channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
+    channel_receive_->RegisterReceiverCongestionControlObjects(packet_router);
 
     // Register with transport.
     rtp_stream_receiver_ = receiver_controller->CreateReceiver(
-        config.rtp.remote_ssrc, channel_proxy_.get());
+        config.rtp.remote_ssrc, channel_receive_.get());
   }
   ConfigureStream(this, config, true);
 }
@@ -136,9 +134,9 @@
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_LOG(LS_INFO) << "~AudioReceiveStream: " << config_.rtp.remote_ssrc;
   Stop();
-  channel_proxy_->DisassociateSendChannel();
+  channel_receive_->SetAssociatedSendChannel(nullptr);
   if (!config_.media_transport) {
-    channel_proxy_->ResetReceiverCongestionControlObjects();
+    channel_receive_->ResetReceiverCongestionControlObjects();
   }
 }
 
@@ -153,7 +151,7 @@
   if (playing_) {
     return;
   }
-  channel_proxy_->StartPlayout();
+  channel_receive_->StartPlayout();
   playing_ = true;
   audio_state()->AddReceivingStream(this);
 }
@@ -163,7 +161,7 @@
   if (!playing_) {
     return;
   }
-  channel_proxy_->StopPlayout();
+  channel_receive_->StopPlayout();
   playing_ = false;
   audio_state()->RemoveReceivingStream(this);
 }
@@ -174,11 +172,11 @@
   stats.remote_ssrc = config_.rtp.remote_ssrc;
 
   webrtc::CallReceiveStatistics call_stats =
-      channel_proxy_->GetRTCPStatistics();
+      channel_receive_->GetRTCPStatistics();
   // TODO(solenberg): Don't return here if we can't get the codec - return the
   //                  stats we *can* get.
   webrtc::CodecInst codec_inst = {0};
-  if (!channel_proxy_->GetRecCodec(&codec_inst)) {
+  if (!channel_receive_->GetRecCodec(&codec_inst)) {
     return stats;
   }
 
@@ -195,13 +193,13 @@
   if (codec_inst.plfreq / 1000 > 0) {
     stats.jitter_ms = call_stats.jitterSamples / (codec_inst.plfreq / 1000);
   }
-  stats.delay_estimate_ms = channel_proxy_->GetDelayEstimate();
-  stats.audio_level = channel_proxy_->GetSpeechOutputLevelFullRange();
-  stats.total_output_energy = channel_proxy_->GetTotalOutputEnergy();
-  stats.total_output_duration = channel_proxy_->GetTotalOutputDuration();
+  stats.delay_estimate_ms = channel_receive_->GetDelayEstimate();
+  stats.audio_level = channel_receive_->GetSpeechOutputLevelFullRange();
+  stats.total_output_energy = channel_receive_->GetTotalOutputEnergy();
+  stats.total_output_duration = channel_receive_->GetTotalOutputDuration();
 
   // Get jitter buffer and total delay (alg + jitter + playout) stats.
-  auto ns = channel_proxy_->GetNetworkStatistics();
+  auto ns = channel_receive_->GetNetworkStatistics();
   stats.jitter_buffer_ms = ns.currentBufferSize;
   stats.jitter_buffer_preferred_ms = ns.preferredBufferSize;
   stats.total_samples_received = ns.totalSamplesReceived;
@@ -216,8 +214,10 @@
   stats.secondary_discarded_rate = Q14ToFloat(ns.currentSecondaryDiscardedRate);
   stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
   stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
+  stats.jitter_buffer_flushes = ns.packetBufferFlushes;
+  stats.delayed_packet_outage_samples = ns.delayedPacketOutageSamples;
 
-  auto ds = channel_proxy_->GetDecodingCallStatistics();
+  auto ds = channel_receive_->GetDecodingCallStatistics();
   stats.decoding_calls_to_silence_generator = ds.calls_to_silence_generator;
   stats.decoding_calls_to_neteq = ds.calls_to_neteq;
   stats.decoding_normal = ds.decoded_normal;
@@ -231,23 +231,23 @@
 
 void AudioReceiveStream::SetSink(AudioSinkInterface* sink) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  channel_proxy_->SetSink(sink);
+  channel_receive_->SetSink(sink);
 }
 
 void AudioReceiveStream::SetGain(float gain) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  channel_proxy_->SetChannelOutputVolumeScaling(gain);
+  channel_receive_->SetChannelOutputVolumeScaling(gain);
 }
 
 std::vector<RtpSource> AudioReceiveStream::GetSources() const {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  return channel_proxy_->GetSources();
+  return channel_receive_->GetSources();
 }
 
 AudioMixer::Source::AudioFrameInfo AudioReceiveStream::GetAudioFrameWithInfo(
     int sample_rate_hz,
     AudioFrame* audio_frame) {
-  return channel_proxy_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+  return channel_receive_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
 }
 
 int AudioReceiveStream::Ssrc() const {
@@ -255,7 +255,7 @@
 }
 
 int AudioReceiveStream::PreferredSampleRate() const {
-  return channel_proxy_->PreferredSampleRate();
+  return channel_receive_->PreferredSampleRate();
 }
 
 int AudioReceiveStream::id() const {
@@ -265,32 +265,29 @@
 
 absl::optional<Syncable::Info> AudioReceiveStream::GetInfo() const {
   RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
-  absl::optional<Syncable::Info> info = channel_proxy_->GetSyncInfo();
+  absl::optional<Syncable::Info> info = channel_receive_->GetSyncInfo();
 
   if (!info)
     return absl::nullopt;
 
-  info->current_delay_ms = channel_proxy_->GetDelayEstimate();
+  info->current_delay_ms = channel_receive_->GetDelayEstimate();
   return info;
 }
 
 uint32_t AudioReceiveStream::GetPlayoutTimestamp() const {
   // Called on video capture thread.
-  return channel_proxy_->GetPlayoutTimestamp();
+  return channel_receive_->GetPlayoutTimestamp();
 }
 
 void AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) {
   RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
-  return channel_proxy_->SetMinimumPlayoutDelay(delay_ms);
+  return channel_receive_->SetMinimumPlayoutDelay(delay_ms);
 }
 
 void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  if (send_stream) {
-    channel_proxy_->AssociateSendChannel(send_stream->GetChannelProxy());
-  } else {
-    channel_proxy_->DisassociateSendChannel();
-  }
+  channel_receive_->SetAssociatedSendChannel(
+      send_stream ? send_stream->GetChannel() : nullptr);
   associated_send_stream_ = send_stream;
 }
 
@@ -303,7 +300,7 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
-  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+  return channel_receive_->ReceivedRTCPPacket(packet, length);
 }
 
 void AudioReceiveStream::OnRtpPacket(const RtpPacketReceived& packet) {
@@ -311,7 +308,7 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
-  channel_proxy_->OnRtpPacket(packet);
+  channel_receive_->OnRtpPacket(packet);
 }
 
 const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
@@ -337,7 +334,7 @@
   RTC_LOG(LS_INFO) << "AudioReceiveStream::ConfigureStream: "
                    << new_config.ToString();
   RTC_DCHECK(stream);
-  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& channel_receive = stream->channel_receive_;
   const auto& old_config = stream->config_;
 
   // Configuration parameters which cannot be changed.
@@ -351,7 +348,7 @@
              old_config.decoder_factory == new_config.decoder_factory);
 
   if (first_time || old_config.rtp.local_ssrc != new_config.rtp.local_ssrc) {
-    channel_proxy->SetLocalSSRC(new_config.rtp.local_ssrc);
+    channel_receive->SetLocalSSRC(new_config.rtp.local_ssrc);
   }
 
   if (!first_time) {
@@ -363,11 +360,11 @@
   // using the actual packet size for the configured codec.
   if (first_time || old_config.rtp.nack.rtp_history_ms !=
                         new_config.rtp.nack.rtp_history_ms) {
-    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
-                                 new_config.rtp.nack.rtp_history_ms / 20);
+    channel_receive->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
+                                   new_config.rtp.nack.rtp_history_ms / 20);
   }
   if (first_time || old_config.decoder_map != new_config.decoder_map) {
-    channel_proxy->SetReceiveCodecs(new_config.decoder_map);
+    channel_receive->SetReceiveCodecs(new_config.decoder_map);
   }
 
   stream->config_ = new_config;
diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h
index dde0da4..86bcb1c 100644
--- a/audio/audio_receive_stream.h
+++ b/audio/audio_receive_stream.h
@@ -31,7 +31,7 @@
 class RtpStreamReceiverInterface;
 
 namespace voe {
-class ChannelReceiveProxy;
+class ChannelReceiveInterface;
 }  // namespace voe
 
 namespace internal {
@@ -47,13 +47,14 @@
                      const webrtc::AudioReceiveStream::Config& config,
                      const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
                      webrtc::RtcEventLog* event_log);
-  // For unit tests, which need to supply a mock channel proxy.
-  AudioReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
-                     PacketRouter* packet_router,
-                     const webrtc::AudioReceiveStream::Config& config,
-                     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
-                     webrtc::RtcEventLog* event_log,
-                     std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy);
+  // For unit tests, which need to supply a mock channel receive.
+  AudioReceiveStream(
+      RtpStreamReceiverControllerInterface* receiver_controller,
+      PacketRouter* packet_router,
+      const webrtc::AudioReceiveStream::Config& config,
+      const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+      webrtc::RtcEventLog* event_log,
+      std::unique_ptr<voe::ChannelReceiveInterface> channel_receive);
   ~AudioReceiveStream() override;
 
   // webrtc::AudioReceiveStream implementation.
@@ -100,7 +101,7 @@
   rtc::ThreadChecker module_process_thread_checker_;
   webrtc::AudioReceiveStream::Config config_;
   rtc::scoped_refptr<webrtc::AudioState> audio_state_;
-  std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy_;
+  const std::unique_ptr<voe::ChannelReceiveInterface> channel_receive_;
   AudioSendStream* associated_send_stream_ = nullptr;
 
   bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false;
diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc
index a5c7e20..7422810 100644
--- a/audio/audio_receive_stream_unittest.cc
+++ b/audio/audio_receive_stream_unittest.cc
@@ -83,16 +83,16 @@
         new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
     audio_state_ = AudioState::Create(config);
 
-    channel_proxy_ = new testing::StrictMock<MockChannelReceiveProxy>();
-    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kLocalSsrc)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 15)).Times(1);
-    EXPECT_CALL(*channel_proxy_,
+    channel_receive_ = new testing::StrictMock<MockChannelReceive>();
+    EXPECT_CALL(*channel_receive_, SetLocalSSRC(kLocalSsrc)).Times(1);
+    EXPECT_CALL(*channel_receive_, SetNACKStatus(true, 15)).Times(1);
+    EXPECT_CALL(*channel_receive_,
                 RegisterReceiverCongestionControlObjects(&packet_router_))
         .Times(1);
-    EXPECT_CALL(*channel_proxy_, ResetReceiverCongestionControlObjects())
+    EXPECT_CALL(*channel_receive_, ResetReceiverCongestionControlObjects())
         .Times(1);
-    EXPECT_CALL(*channel_proxy_, DisassociateSendChannel()).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetReceiveCodecs(_))
+    EXPECT_CALL(*channel_receive_, SetAssociatedSendChannel(nullptr)).Times(1);
+    EXPECT_CALL(*channel_receive_, SetReceiveCodecs(_))
         .WillRepeatedly(Invoke([](const std::map<int, SdpAudioFormat>& codecs) {
           EXPECT_THAT(codecs, testing::IsEmpty());
         }));
@@ -114,33 +114,33 @@
         new internal::AudioReceiveStream(
             &rtp_stream_receiver_controller_, &packet_router_, stream_config_,
             audio_state_, &event_log_,
-            std::unique_ptr<voe::ChannelReceiveProxy>(channel_proxy_)));
+            std::unique_ptr<voe::ChannelReceiveInterface>(channel_receive_)));
   }
 
   AudioReceiveStream::Config& config() { return stream_config_; }
   rtc::scoped_refptr<MockAudioMixer> audio_mixer() { return audio_mixer_; }
-  MockChannelReceiveProxy* channel_proxy() { return channel_proxy_; }
+  MockChannelReceive* channel_receive() { return channel_receive_; }
 
   void SetupMockForGetStats() {
     using testing::DoAll;
     using testing::SetArgPointee;
 
-    ASSERT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+    ASSERT_TRUE(channel_receive_);
+    EXPECT_CALL(*channel_receive_, GetRTCPStatistics())
         .WillOnce(Return(kCallStats));
-    EXPECT_CALL(*channel_proxy_, GetDelayEstimate())
+    EXPECT_CALL(*channel_receive_, GetDelayEstimate())
         .WillOnce(Return(kJitterBufferDelay + kPlayoutBufferDelay));
-    EXPECT_CALL(*channel_proxy_, GetSpeechOutputLevelFullRange())
+    EXPECT_CALL(*channel_receive_, GetSpeechOutputLevelFullRange())
         .WillOnce(Return(kSpeechOutputLevel));
-    EXPECT_CALL(*channel_proxy_, GetTotalOutputEnergy())
+    EXPECT_CALL(*channel_receive_, GetTotalOutputEnergy())
         .WillOnce(Return(kTotalOutputEnergy));
-    EXPECT_CALL(*channel_proxy_, GetTotalOutputDuration())
+    EXPECT_CALL(*channel_receive_, GetTotalOutputDuration())
         .WillOnce(Return(kTotalOutputDuration));
-    EXPECT_CALL(*channel_proxy_, GetNetworkStatistics())
+    EXPECT_CALL(*channel_receive_, GetNetworkStatistics())
         .WillOnce(Return(kNetworkStats));
-    EXPECT_CALL(*channel_proxy_, GetDecodingCallStatistics())
+    EXPECT_CALL(*channel_receive_, GetDecodingCallStatistics())
         .WillOnce(Return(kAudioDecodeStats));
-    EXPECT_CALL(*channel_proxy_, GetRecCodec(_))
+    EXPECT_CALL(*channel_receive_, GetRecCodec(_))
         .WillOnce(DoAll(SetArgPointee<0>(kCodecInst), Return(true)));
   }
 
@@ -150,7 +150,7 @@
   rtc::scoped_refptr<AudioState> audio_state_;
   rtc::scoped_refptr<MockAudioMixer> audio_mixer_;
   AudioReceiveStream::Config stream_config_;
-  testing::StrictMock<MockChannelReceiveProxy>* channel_proxy_ = nullptr;
+  testing::StrictMock<MockChannelReceive>* channel_receive_ = nullptr;
   RtpStreamReceiverController rtp_stream_receiver_controller_;
   MockTransport rtcp_send_transport_;
 };
@@ -239,7 +239,7 @@
   ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
   parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
 
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               OnRtpPacket(testing::Ref(parsed_packet)));
 
   recv_stream->OnRtpPacket(parsed_packet);
@@ -250,7 +250,7 @@
   helper.config().rtp.transport_cc = true;
   auto recv_stream = helper.CreateAudioReceiveStream();
   std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
       .WillOnce(Return(true));
   EXPECT_TRUE(recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()));
@@ -312,7 +312,7 @@
 TEST(AudioReceiveStreamTest, SetGain) {
   ConfigHelper helper;
   auto recv_stream = helper.CreateAudioReceiveStream();
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               SetChannelOutputVolumeScaling(FloatEq(0.765f)));
   recv_stream->SetGain(0.765f);
 }
@@ -323,10 +323,10 @@
   auto recv_stream1 = helper1.CreateAudioReceiveStream();
   auto recv_stream2 = helper2.CreateAudioReceiveStream();
 
-  EXPECT_CALL(*helper1.channel_proxy(), StartPlayout()).Times(1);
-  EXPECT_CALL(*helper2.channel_proxy(), StartPlayout()).Times(1);
-  EXPECT_CALL(*helper1.channel_proxy(), StopPlayout()).Times(1);
-  EXPECT_CALL(*helper2.channel_proxy(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
   EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
       .WillOnce(Return(true));
   EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
@@ -367,10 +367,10 @@
                    kTransportSequenceNumberId + 1));
   new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
 
-  MockChannelReceiveProxy& channel_proxy = *helper.channel_proxy();
-  EXPECT_CALL(channel_proxy, SetLocalSSRC(kLocalSsrc + 1)).Times(1);
-  EXPECT_CALL(channel_proxy, SetNACKStatus(true, 15 + 1)).Times(1);
-  EXPECT_CALL(channel_proxy, SetReceiveCodecs(new_config.decoder_map));
+  MockChannelReceive& channel_receive = *helper.channel_receive();
+  EXPECT_CALL(channel_receive, SetLocalSSRC(kLocalSsrc + 1)).Times(1);
+  EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
+  EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
 
   recv_stream->Reconfigure(new_config);
 }
diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc
index 37f89c5..75e6efb 100644
--- a/audio/audio_send_stream.cc
+++ b/audio/audio_send_stream.cc
@@ -22,12 +22,10 @@
 #include "api/crypto/frameencryptorinterface.h"
 #include "audio/audio_state.h"
 #include "audio/channel_send.h"
-#include "audio/channel_send_proxy.h"
 #include "audio/conversion.h"
 #include "call/rtp_config.h"
 #include "call/rtp_transport_controller_send_interface.h"
 #include "common_audio/vad/include/vad.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "logging/rtc_event_log/rtc_stream_config.h"
@@ -50,29 +48,14 @@
 constexpr size_t kPacketLossRateMinNumAckedPackets = 50;
 constexpr size_t kRecoverablePacketLossRateMinNumAckedPairs = 40;
 
-void CallEncoder(const std::unique_ptr<voe::ChannelSendProxy>& channel_proxy,
+void CallEncoder(const std::unique_ptr<voe::ChannelSendInterface>& channel_send,
                  rtc::FunctionView<void(AudioEncoder*)> lambda) {
-  channel_proxy->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+  channel_send->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
     RTC_DCHECK(encoder_ptr);
     lambda(encoder_ptr->get());
   });
 }
 
-std::unique_ptr<voe::ChannelSendProxy> CreateChannelAndProxy(
-    rtc::TaskQueue* worker_queue,
-    ProcessThread* module_process_thread,
-    MediaTransportInterface* media_transport,
-    RtcpRttStats* rtcp_rtt_stats,
-    RtcEventLog* event_log,
-    FrameEncryptorInterface* frame_encryptor,
-    const webrtc::CryptoOptions& crypto_options,
-    bool extmap_allow_mixed) {
-  return absl::make_unique<voe::ChannelSendProxy>(
-      absl::make_unique<voe::ChannelSend>(
-          worker_queue, module_process_thread, media_transport, rtcp_rtt_stats,
-          event_log, frame_encryptor, crypto_options, extmap_allow_mixed));
-}
-
 void UpdateEventLogStreamConfig(RtcEventLog* event_log,
                                 const AudioSendStream::Config& config,
                                 const AudioSendStream::Config* old_config) {
@@ -107,29 +90,6 @@
 
 }  // namespace
 
-// Helper class to track the actively sending lifetime of this stream.
-class AudioSendStream::TimedTransport : public Transport {
- public:
-  TimedTransport(Transport* transport, TimeInterval* time_interval)
-      : transport_(transport), lifetime_(time_interval) {}
-  bool SendRtp(const uint8_t* packet,
-               size_t length,
-               const PacketOptions& options) {
-    if (lifetime_) {
-      lifetime_->Extend();
-    }
-    return transport_->SendRtp(packet, length, options);
-  }
-  bool SendRtcp(const uint8_t* packet, size_t length) {
-    return transport_->SendRtcp(packet, length);
-  }
-  ~TimedTransport() {}
-
- private:
-  Transport* transport_;
-  TimeInterval* lifetime_;
-};
-
 AudioSendStream::AudioSendStream(
     const webrtc::AudioSendStream::Config& config,
     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
@@ -139,8 +99,7 @@
     BitrateAllocatorInterface* bitrate_allocator,
     RtcEventLog* event_log,
     RtcpRttStats* rtcp_rtt_stats,
-    const absl::optional<RtpState>& suspended_rtp_state,
-    TimeInterval* overall_call_lifetime)
+    const absl::optional<RtpState>& suspended_rtp_state)
     : AudioSendStream(config,
                       audio_state,
                       worker_queue,
@@ -149,15 +108,16 @@
                       event_log,
                       rtcp_rtt_stats,
                       suspended_rtp_state,
-                      overall_call_lifetime,
-                      CreateChannelAndProxy(worker_queue,
-                                            module_process_thread,
-                                            config.media_transport,
-                                            rtcp_rtt_stats,
-                                            event_log,
-                                            config.frame_encryptor,
-                                            config.crypto_options,
-                                            config.rtp.extmap_allow_mixed)) {}
+                      voe::CreateChannelSend(worker_queue,
+                                             module_process_thread,
+                                             config.media_transport,
+                                             config.send_transport,
+                                             rtcp_rtt_stats,
+                                             event_log,
+                                             config.frame_encryptor,
+                                             config.crypto_options,
+                                             config.rtp.extmap_allow_mixed,
+                                             config.rtcp_report_interval_ms)) {}
 
 AudioSendStream::AudioSendStream(
     const webrtc::AudioSendStream::Config& config,
@@ -168,13 +128,12 @@
     RtcEventLog* event_log,
     RtcpRttStats* rtcp_rtt_stats,
     const absl::optional<RtpState>& suspended_rtp_state,
-    TimeInterval* overall_call_lifetime,
-    std::unique_ptr<voe::ChannelSendProxy> channel_proxy)
+    std::unique_ptr<voe::ChannelSendInterface> channel_send)
     : worker_queue_(worker_queue),
       config_(Config(/*send_transport=*/nullptr,
                      /*media_transport=*/nullptr)),
       audio_state_(audio_state),
-      channel_proxy_(std::move(channel_proxy)),
+      channel_send_(std::move(channel_send)),
       event_log_(event_log),
       bitrate_allocator_(bitrate_allocator),
       rtp_transport_(rtp_transport),
@@ -182,22 +141,19 @@
                            kPacketLossRateMinNumAckedPackets,
                            kRecoverablePacketLossRateMinNumAckedPairs),
       rtp_rtcp_module_(nullptr),
-      suspended_rtp_state_(suspended_rtp_state),
-      overall_call_lifetime_(overall_call_lifetime) {
+      suspended_rtp_state_(suspended_rtp_state) {
   RTC_LOG(LS_INFO) << "AudioSendStream: " << config.rtp.ssrc;
   RTC_DCHECK(worker_queue_);
   RTC_DCHECK(audio_state_);
-  RTC_DCHECK(channel_proxy_);
+  RTC_DCHECK(channel_send_);
   RTC_DCHECK(bitrate_allocator_);
   // TODO(nisse): Eventually, we should have only media_transport. But for the
   // time being, we can have either. When media transport is injected, there
   // should be no rtp_transport, and below check should be strengthened to XOR
   // (either rtp_transport or media_transport but not both).
   RTC_DCHECK(rtp_transport || config.media_transport);
-  RTC_DCHECK(overall_call_lifetime_);
 
-  channel_proxy_->SetRTCPStatus(true);
-  rtp_rtcp_module_ = channel_proxy_->GetRtpRtcp();
+  rtp_rtcp_module_ = channel_send_->GetRtpRtcp();
   RTC_DCHECK(rtp_rtcp_module_);
 
   ConfigureStream(this, config, true);
@@ -216,13 +172,8 @@
   RTC_DCHECK(!sending_);
   if (rtp_transport_) {
     rtp_transport_->DeRegisterPacketFeedbackObserver(this);
-    channel_proxy_->RegisterTransport(nullptr);
-    channel_proxy_->ResetSenderCongestionControlObjects();
+    channel_send_->ResetSenderCongestionControlObjects();
   }
-  // Lifetime can only be updated after deregistering
-  // |timed_send_transport_adapter_| in the underlying channel object to avoid
-  // data races in |active_lifetime_|.
-  overall_call_lifetime_->Extend(active_lifetime_);
 }
 
 const webrtc::AudioSendStream::Config& AudioSendStream::GetConfig() const {
@@ -260,56 +211,39 @@
   UpdateEventLogStreamConfig(stream->event_log_, new_config,
                              first_time ? nullptr : &stream->config_);
 
-  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& channel_send = stream->channel_send_;
   const auto& old_config = stream->config_;
 
+  // Configuration parameters which cannot be changed.
+  RTC_DCHECK(first_time ||
+             old_config.send_transport == new_config.send_transport);
+
   if (first_time || old_config.rtp.ssrc != new_config.rtp.ssrc) {
-    channel_proxy->SetLocalSSRC(new_config.rtp.ssrc);
+    channel_send->SetLocalSSRC(new_config.rtp.ssrc);
     if (stream->suspended_rtp_state_) {
       stream->rtp_rtcp_module_->SetRtpState(*stream->suspended_rtp_state_);
     }
   }
   if (first_time || old_config.rtp.c_name != new_config.rtp.c_name) {
-    channel_proxy->SetRTCP_CNAME(new_config.rtp.c_name);
-  }
-  // TODO(solenberg): Config NACK history window (which is a packet count),
-  // using the actual packet size for the configured codec.
-  if (first_time || old_config.rtp.nack.rtp_history_ms !=
-                        new_config.rtp.nack.rtp_history_ms) {
-    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
-                                 new_config.rtp.nack.rtp_history_ms / 20);
-  }
-
-  if (first_time || new_config.send_transport != old_config.send_transport) {
-    if (old_config.send_transport) {
-      channel_proxy->RegisterTransport(nullptr);
-    }
-    if (new_config.send_transport) {
-      stream->timed_send_transport_adapter_.reset(new TimedTransport(
-          new_config.send_transport, &stream->active_lifetime_));
-    } else {
-      stream->timed_send_transport_adapter_.reset(nullptr);
-    }
-    channel_proxy->RegisterTransport(
-        stream->timed_send_transport_adapter_.get());
+    channel_send->SetRTCP_CNAME(new_config.rtp.c_name);
   }
 
   // Enable the frame encryptor if a new frame encryptor has been provided.
   if (first_time || new_config.frame_encryptor != old_config.frame_encryptor) {
-    channel_proxy->SetFrameEncryptor(new_config.frame_encryptor);
+    channel_send->SetFrameEncryptor(new_config.frame_encryptor);
   }
 
   if (first_time ||
       new_config.rtp.extmap_allow_mixed != old_config.rtp.extmap_allow_mixed) {
-    channel_proxy->SetExtmapAllowMixed(new_config.rtp.extmap_allow_mixed);
+    channel_send->SetExtmapAllowMixed(new_config.rtp.extmap_allow_mixed);
   }
 
   const ExtensionIds old_ids = FindExtensionIds(old_config.rtp.extensions);
   const ExtensionIds new_ids = FindExtensionIds(new_config.rtp.extensions);
   // Audio level indication
   if (first_time || new_ids.audio_level != old_ids.audio_level) {
-    channel_proxy->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
-                                                     new_ids.audio_level);
+    channel_send->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
+                                                    new_ids.audio_level);
   }
   bool transport_seq_num_id_changed =
       new_ids.transport_sequence_number != old_ids.transport_sequence_number;
@@ -317,7 +251,7 @@
       (transport_seq_num_id_changed &&
        !webrtc::field_trial::IsEnabled("WebRTC-Audio-ForceNoTWCC"))) {
     if (!first_time) {
-      channel_proxy->ResetSenderCongestionControlObjects();
+      channel_send->ResetSenderCongestionControlObjects();
     }
 
     RtcpBandwidthObserver* bandwidth_observer = nullptr;
@@ -325,7 +259,7 @@
         new_ids.transport_sequence_number != 0 &&
         !webrtc::field_trial::IsEnabled("WebRTC-Audio-ForceNoTWCC");
     if (has_transport_sequence_number) {
-      channel_proxy->EnableSendTransportSequenceNumber(
+      channel_send->EnableSendTransportSequenceNumber(
           new_ids.transport_sequence_number);
       // Probing in application limited region is only used in combination with
       // send side congestion control, wich depends on feedback packets which
@@ -336,7 +270,7 @@
       }
     }
     if (stream->rtp_transport_) {
-      channel_proxy->RegisterSenderCongestionControlObjects(
+      channel_send->RegisterSenderCongestionControlObjects(
           stream->rtp_transport_, bandwidth_observer);
     }
   }
@@ -344,7 +278,7 @@
   if ((first_time || new_ids.mid != old_ids.mid ||
        new_config.rtp.mid != old_config.rtp.mid) &&
       new_ids.mid != 0 && !new_config.rtp.mid.empty()) {
-    channel_proxy->SetMid(new_config.rtp.mid, new_ids.mid);
+    channel_send->SetMid(new_config.rtp.mid, new_ids.mid);
   }
 
   if (!ReconfigureSendCodec(stream, new_config)) {
@@ -380,7 +314,7 @@
   } else {
     rtp_rtcp_module_->SetAsPartOfAllocation(false);
   }
-  channel_proxy_->StartSend();
+  channel_send_->StartSend();
   sending_ = true;
   audio_state()->AddSendingStream(this, encoder_sample_rate_hz_,
                                   encoder_num_channels_);
@@ -393,14 +327,14 @@
   }
 
   RemoveBitrateObserver();
-  channel_proxy_->StopSend();
+  channel_send_->StopSend();
   sending_ = false;
   audio_state()->RemoveSendingStream(this);
 }
 
 void AudioSendStream::SendAudioData(std::unique_ptr<AudioFrame> audio_frame) {
   RTC_CHECK_RUNS_SERIALIZED(&audio_capture_race_checker_);
-  channel_proxy_->ProcessAndEncodeAudio(std::move(audio_frame));
+  channel_send_->ProcessAndEncodeAudio(std::move(audio_frame));
 }
 
 bool AudioSendStream::SendTelephoneEvent(int payload_type,
@@ -408,14 +342,14 @@
                                          int event,
                                          int duration_ms) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_proxy_->SetSendTelephoneEventPayloadType(payload_type,
-                                                          payload_frequency) &&
-         channel_proxy_->SendTelephoneEventOutband(event, duration_ms);
+  return channel_send_->SetSendTelephoneEventPayloadType(payload_type,
+                                                         payload_frequency) &&
+         channel_send_->SendTelephoneEventOutband(event, duration_ms);
 }
 
 void AudioSendStream::SetMuted(bool muted) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_proxy_->SetInputMute(muted);
+  channel_send_->SetInputMute(muted);
 }
 
 webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
@@ -427,9 +361,9 @@
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   webrtc::AudioSendStream::Stats stats;
   stats.local_ssrc = config_.rtp.ssrc;
-  stats.target_bitrate_bps = channel_proxy_->GetBitrate();
+  stats.target_bitrate_bps = channel_send_->GetBitrate();
 
-  webrtc::CallSendStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+  webrtc::CallSendStatistics call_stats = channel_send_->GetRTCPStatistics();
   stats.bytes_sent = call_stats.bytesSent;
   stats.packets_sent = call_stats.packetsSent;
   // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
@@ -443,7 +377,7 @@
     stats.codec_payload_type = spec.payload_type;
 
     // Get data from the last remote RTCP report.
-    for (const auto& block : channel_proxy_->GetRemoteRTCPReportBlocks()) {
+    for (const auto& block : channel_send_->GetRemoteRTCPReportBlocks()) {
       // Lookup report for send ssrc only.
       if (block.source_SSRC == stats.local_ssrc) {
         stats.packets_lost = block.cumulative_num_packets_lost;
@@ -465,7 +399,7 @@
   stats.total_input_duration = input_stats.total_duration;
 
   stats.typing_noise_detected = audio_state()->typing_noise_detected();
-  stats.ana_statistics = channel_proxy_->GetANAStatistics();
+  stats.ana_statistics = channel_send_->GetANAStatistics();
   RTC_DCHECK(audio_state_->audio_processing());
   stats.apm_statistics =
       audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
@@ -482,25 +416,24 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
-  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+  return channel_send_->ReceivedRTCPPacket(packet, length);
 }
 
 uint32_t AudioSendStream::OnBitrateUpdated(BitrateAllocationUpdate update) {
   // A send stream may be allocated a bitrate of zero if the allocator decides
   // to disable it. For now we ignore this decision and keep sending on min
   // bitrate.
-  if (update.bitrate_bps == 0) {
-    update.bitrate_bps = config_.min_bitrate_bps;
+  if (update.target_bitrate.IsZero()) {
+    update.target_bitrate = DataRate::bps(config_.min_bitrate_bps);
   }
-  RTC_DCHECK_GE(update.bitrate_bps,
-                static_cast<uint32_t>(config_.min_bitrate_bps));
+  RTC_DCHECK_GE(update.target_bitrate.bps<int>(), config_.min_bitrate_bps);
   // The bitrate allocator might allocate an higher than max configured bitrate
   // if there is room, to allow for, as example, extra FEC. Ignore that for now.
-  const uint32_t max_bitrate_bps = config_.max_bitrate_bps;
-  if (update.bitrate_bps > max_bitrate_bps)
-    update.bitrate_bps = max_bitrate_bps;
+  const DataRate max_bitrate = DataRate::bps(config_.max_bitrate_bps);
+  if (update.target_bitrate > max_bitrate)
+    update.target_bitrate = max_bitrate;
 
-  channel_proxy_->SetBitrate(update.bitrate_bps, update.bwe_period_ms);
+  channel_send_->OnBitrateAllocation(update);
 
   // The amount of audio protection is not exposed by the encoder, hence
   // always returning 0.
@@ -534,25 +467,24 @@
   // the previously sent value is no longer relevant. This will be taken care
   // of with some refactoring which is now being done.
   if (plr) {
-    channel_proxy_->OnTwccBasedUplinkPacketLossRate(*plr);
+    channel_send_->OnTwccBasedUplinkPacketLossRate(*plr);
   }
   if (rplr) {
-    channel_proxy_->OnRecoverableUplinkPacketLossRate(*rplr);
+    channel_send_->OnRecoverableUplinkPacketLossRate(*rplr);
   }
 }
 
 void AudioSendStream::SetTransportOverhead(int transport_overhead_per_packet) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_proxy_->SetTransportOverhead(transport_overhead_per_packet);
+  channel_send_->SetTransportOverhead(transport_overhead_per_packet);
 }
 
 RtpState AudioSendStream::GetRtpState() const {
   return rtp_rtcp_module_->GetRtpState();
 }
 
-const voe::ChannelSendProxy& AudioSendStream::GetChannelProxy() const {
-  RTC_DCHECK(channel_proxy_.get());
-  return *channel_proxy_.get();
+const voe::ChannelSendInterface* AudioSendStream::GetChannel() const {
+  return channel_send_.get();
 }
 
 internal::AudioState* AudioSendStream::audio_state() {
@@ -637,8 +569,8 @@
 
   stream->StoreEncoderProperties(encoder->SampleRateHz(),
                                  encoder->NumChannels());
-  stream->channel_proxy_->SetEncoder(new_config.send_codec_spec->payload_type,
-                                     std::move(encoder));
+  stream->channel_send_->SetEncoder(new_config.send_codec_spec->payload_type,
+                                    std::move(encoder));
   return true;
 }
 
@@ -684,7 +616,7 @@
   if (!do_not_update_target_bitrate && new_target_bitrate_bps &&
       new_target_bitrate_bps !=
           old_config.send_codec_spec->target_bitrate_bps) {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       encoder->OnReceivedTargetAudioBitrate(*new_target_bitrate_bps);
     });
   }
@@ -702,7 +634,7 @@
     return;
   }
   if (new_config.audio_network_adaptor_config) {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       if (encoder->EnableAudioNetworkAdaptor(
               *new_config.audio_network_adaptor_config, stream->event_log_)) {
         RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
@@ -712,7 +644,7 @@
       }
     });
   } else {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       encoder->DisableAudioNetworkAdaptor();
     });
     RTC_DLOG(LS_INFO) << "Audio network adaptor disabled on SSRC "
@@ -736,7 +668,7 @@
   }
 
   // Wrap or unwrap the encoder in an AudioEncoderCNG.
-  stream->channel_proxy_->ModifyEncoder(
+  stream->channel_send_->ModifyEncoder(
       [&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
         std::unique_ptr<AudioEncoder> old_encoder(std::move(*encoder_ptr));
         auto sub_encoders = old_encoder->ReclaimContainedEncoders();
diff --git a/audio/audio_send_stream.h b/audio/audio_send_stream.h
index c86a9dc..bf94901 100644
--- a/audio/audio_send_stream.h
+++ b/audio/audio_send_stream.h
@@ -14,7 +14,7 @@
 #include <memory>
 #include <vector>
 
-#include "audio/time_interval.h"
+#include "audio/channel_send.h"
 #include "audio/transport_feedback_packet_loss_tracker.h"
 #include "call/audio_send_stream.h"
 #include "call/audio_state.h"
@@ -30,10 +30,6 @@
 class RtcpRttStats;
 class RtpTransportControllerSendInterface;
 
-namespace voe {
-class ChannelSendProxy;
-}  // namespace voe
-
 namespace internal {
 class AudioState;
 
@@ -49,9 +45,8 @@
                   BitrateAllocatorInterface* bitrate_allocator,
                   RtcEventLog* event_log,
                   RtcpRttStats* rtcp_rtt_stats,
-                  const absl::optional<RtpState>& suspended_rtp_state,
-                  TimeInterval* overall_call_lifetime);
-  // For unit tests, which need to supply a mock channel proxy.
+                  const absl::optional<RtpState>& suspended_rtp_state);
+  // For unit tests, which need to supply a mock ChannelSend.
   AudioSendStream(const webrtc::AudioSendStream::Config& config,
                   const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
                   rtc::TaskQueue* worker_queue,
@@ -60,8 +55,7 @@
                   RtcEventLog* event_log,
                   RtcpRttStats* rtcp_rtt_stats,
                   const absl::optional<RtpState>& suspended_rtp_state,
-                  TimeInterval* overall_call_lifetime,
-                  std::unique_ptr<voe::ChannelSendProxy> channel_proxy);
+                  std::unique_ptr<voe::ChannelSendInterface> channel_send);
   ~AudioSendStream() override;
 
   // webrtc::AudioSendStream implementation.
@@ -93,7 +87,7 @@
   void SetTransportOverhead(int transport_overhead_per_packet);
 
   RtpState GetRtpState() const;
-  const voe::ChannelSendProxy& GetChannelProxy() const;
+  const voe::ChannelSendInterface* GetChannel() const;
 
  private:
   class TimedTransport;
@@ -130,7 +124,7 @@
   rtc::TaskQueue* worker_queue_;
   webrtc::AudioSendStream::Config config_;
   rtc::scoped_refptr<webrtc::AudioState> audio_state_;
-  std::unique_ptr<voe::ChannelSendProxy> channel_proxy_;
+  const std::unique_ptr<voe::ChannelSendInterface> channel_send_;
   RtcEventLog* const event_log_;
 
   int encoder_sample_rate_hz_ = 0;
@@ -147,10 +141,6 @@
   RtpRtcp* rtp_rtcp_module_;
   absl::optional<RtpState> const suspended_rtp_state_;
 
-  std::unique_ptr<TimedTransport> timed_send_transport_adapter_;
-  TimeInterval active_lifetime_;
-  TimeInterval* overall_call_lifetime_ = nullptr;
-
   // RFC 5285: Each distinct extension MUST have a unique ID. The value 0 is
   // reserved for padding and MUST NOT be used as a local identifier.
   // So it should be safe to use 0 here to indicate "not configured".
diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc
index 6a92329..e400ada 100644
--- a/audio/audio_send_stream_unittest.cc
+++ b/audio/audio_send_stream_unittest.cc
@@ -14,7 +14,6 @@
 
 #include "absl/memory/memory.h"
 #include "api/test/mock_frame_encryptor.h"
-#include "api/units/time_delta.h"
 #include "audio/audio_send_stream.h"
 #include "audio/audio_state.h"
 #include "audio/conversion.h"
@@ -28,12 +27,10 @@
 #include "modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h"
 #include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
 #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
-#include "rtc_base/fakeclock.h"
 #include "rtc_base/task_queue.h"
 #include "test/gtest.h"
 #include "test/mock_audio_encoder.h"
 #include "test/mock_audio_encoder_factory.h"
-#include "test/mock_transport.h"
 
 namespace webrtc {
 namespace test {
@@ -42,6 +39,7 @@
 using testing::_;
 using testing::Eq;
 using testing::Ne;
+using testing::Field;
 using testing::Invoke;
 using testing::Return;
 using testing::StrEq;
@@ -143,7 +141,7 @@
         new rtc::RefCountedObject<MockAudioDeviceModule>();
     audio_state_ = AudioState::Create(config);
 
-    SetupDefaultChannelProxy(audio_bwe_enabled);
+    SetupDefaultChannelSend(audio_bwe_enabled);
     SetupMockForSetupSendCodec(expect_set_encoder_call);
 
     // Use ISAC as default codec so as to prevent unnecessary |channel_proxy_|
@@ -151,7 +149,6 @@
     stream_config_.send_codec_spec =
         AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
     stream_config_.rtp.ssrc = kSsrc;
-    stream_config_.rtp.nack.rtp_history_ms = 200;
     stream_config_.rtp.c_name = kCName;
     stream_config_.rtp.extensions.push_back(
         RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
@@ -168,8 +165,7 @@
         new internal::AudioSendStream(
             stream_config_, audio_state_, &worker_queue_, &rtp_transport_,
             &bitrate_allocator_, &event_log_, &rtcp_rtt_stats_, absl::nullopt,
-            &active_lifetime_,
-            std::unique_ptr<voe::ChannelSendProxy>(channel_proxy_)));
+            std::unique_ptr<voe::ChannelSendInterface>(channel_send_)));
   }
 
   AudioSendStream::Config& config() { return stream_config_; }
@@ -177,9 +173,8 @@
     return *static_cast<MockAudioEncoderFactory*>(
         stream_config_.encoder_factory.get());
   }
-  MockChannelSendProxy* channel_proxy() { return channel_proxy_; }
+  MockChannelSend* channel_send() { return channel_send_; }
   RtpTransportControllerSendInterface* transport() { return &rtp_transport_; }
-  TimeInterval* active_lifetime() { return &active_lifetime_; }
 
   static void AddBweToConfig(AudioSendStream::Config* config) {
     config->rtp.extensions.push_back(RtpExtension(
@@ -187,48 +182,40 @@
     config->send_codec_spec->transport_cc_enabled = true;
   }
 
-  void SetupDefaultChannelProxy(bool audio_bwe_enabled) {
-    EXPECT_TRUE(channel_proxy_ == nullptr);
-    channel_proxy_ = new testing::StrictMock<MockChannelSendProxy>();
-    EXPECT_CALL(*channel_proxy_, GetRtpRtcp()).WillRepeatedly(Invoke([this]() {
+  void SetupDefaultChannelSend(bool audio_bwe_enabled) {
+    EXPECT_TRUE(channel_send_ == nullptr);
+    channel_send_ = new testing::StrictMock<MockChannelSend>();
+    EXPECT_CALL(*channel_send_, GetRtpRtcp()).WillRepeatedly(Invoke([this]() {
       return &this->rtp_rtcp_;
     }));
-    EXPECT_CALL(*channel_proxy_, SetRTCPStatus(true)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kSsrc)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 10)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetFrameEncryptor(_)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetExtmapAllowMixed(false)).Times(1);
-    EXPECT_CALL(*channel_proxy_,
+    EXPECT_CALL(*channel_send_, SetLocalSSRC(kSsrc)).Times(1);
+    EXPECT_CALL(*channel_send_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
+    EXPECT_CALL(*channel_send_, SetFrameEncryptor(_)).Times(1);
+    EXPECT_CALL(*channel_send_, SetExtmapAllowMixed(false)).Times(1);
+    EXPECT_CALL(*channel_send_,
                 SetSendAudioLevelIndicationStatus(true, kAudioLevelId))
         .Times(1);
     EXPECT_CALL(rtp_transport_, GetBandwidthObserver())
         .WillRepeatedly(Return(&bandwidth_observer_));
     if (audio_bwe_enabled) {
-      EXPECT_CALL(*channel_proxy_,
+      EXPECT_CALL(*channel_send_,
                   EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
           .Times(1);
-      EXPECT_CALL(*channel_proxy_,
+      EXPECT_CALL(*channel_send_,
                   RegisterSenderCongestionControlObjects(
                       &rtp_transport_, Eq(&bandwidth_observer_)))
           .Times(1);
     } else {
-      EXPECT_CALL(*channel_proxy_, RegisterSenderCongestionControlObjects(
-                                       &rtp_transport_, Eq(nullptr)))
+      EXPECT_CALL(*channel_send_, RegisterSenderCongestionControlObjects(
+                                      &rtp_transport_, Eq(nullptr)))
           .Times(1);
     }
-    EXPECT_CALL(*channel_proxy_, ResetSenderCongestionControlObjects())
-        .Times(1);
-    {
-      ::testing::InSequence unregister_on_destruction;
-      EXPECT_CALL(*channel_proxy_, RegisterTransport(_)).Times(1);
-      EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(1);
-    }
+    EXPECT_CALL(*channel_send_, ResetSenderCongestionControlObjects()).Times(1);
   }
 
   void SetupMockForSetupSendCodec(bool expect_set_encoder_call) {
     if (expect_set_encoder_call) {
-      EXPECT_CALL(*channel_proxy_, SetEncoderForMock(_, _))
+      EXPECT_CALL(*channel_send_, SetEncoderForMock(_, _))
           .WillOnce(Invoke(
               [this](int payload_type, std::unique_ptr<AudioEncoder>* encoder) {
                 this->audio_encoder_ = std::move(*encoder);
@@ -239,7 +226,7 @@
 
   void SetupMockForModifyEncoder() {
     // Let ModifyEncoder to invoke mock audio encoder.
-    EXPECT_CALL(*channel_proxy_, ModifyEncoder(_))
+    EXPECT_CALL(*channel_send_, ModifyEncoder(_))
         .WillRepeatedly(Invoke(
             [this](rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
                        modifier) {
@@ -249,13 +236,13 @@
   }
 
   void SetupMockForSendTelephoneEvent() {
-    EXPECT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, SetSendTelephoneEventPayloadType(
-                                     kTelephoneEventPayloadType,
-                                     kTelephoneEventPayloadFrequency))
+    EXPECT_TRUE(channel_send_);
+    EXPECT_CALL(*channel_send_, SetSendTelephoneEventPayloadType(
+                                    kTelephoneEventPayloadType,
+                                    kTelephoneEventPayloadFrequency))
         .WillOnce(Return(true));
     EXPECT_CALL(
-        *channel_proxy_,
+        *channel_send_,
         SendTelephoneEventOutband(kTelephoneEventCode, kTelephoneEventDuration))
         .WillOnce(Return(true));
   }
@@ -273,14 +260,14 @@
     block.fraction_lost = 0;
     report_blocks.push_back(block);  // Duplicate SSRC, bad fraction_lost.
 
-    EXPECT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+    EXPECT_TRUE(channel_send_);
+    EXPECT_CALL(*channel_send_, GetRTCPStatistics())
         .WillRepeatedly(Return(kCallStats));
-    EXPECT_CALL(*channel_proxy_, GetRemoteRTCPReportBlocks())
+    EXPECT_CALL(*channel_send_, GetRemoteRTCPReportBlocks())
         .WillRepeatedly(Return(report_blocks));
-    EXPECT_CALL(*channel_proxy_, GetANAStatistics())
+    EXPECT_CALL(*channel_send_, GetANAStatistics())
         .WillRepeatedly(Return(ANAStats()));
-    EXPECT_CALL(*channel_proxy_, GetBitrate()).WillRepeatedly(Return(0));
+    EXPECT_CALL(*channel_send_, GetBitrate()).WillRepeatedly(Return(0));
 
     audio_processing_stats_.echo_return_loss = kEchoReturnLoss;
     audio_processing_stats_.echo_return_loss_enhancement =
@@ -300,10 +287,9 @@
  private:
   rtc::scoped_refptr<AudioState> audio_state_;
   AudioSendStream::Config stream_config_;
-  testing::StrictMock<MockChannelSendProxy>* channel_proxy_ = nullptr;
+  testing::StrictMock<MockChannelSend>* channel_send_ = nullptr;
   rtc::scoped_refptr<MockAudioProcessing> audio_processing_;
   AudioProcessingStats audio_processing_stats_;
-  TimeInterval active_lifetime_;
   testing::StrictMock<MockRtcpBandwidthObserver> bandwidth_observer_;
   testing::NiceMock<MockRtcEventLog> event_log_;
   testing::NiceMock<MockRtpTransportControllerSend> rtp_transport_;
@@ -334,11 +320,12 @@
   config.rtp.extmap_allow_mixed = true;
   config.rtp.extensions.push_back(
       RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+  config.rtcp_report_interval_ms = 2500;
   EXPECT_EQ(
       "{rtp: {ssrc: 1234, extmap-allow-mixed: true, extensions: [{uri: "
-      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], nack: "
-      "{rtp_history_ms: 0}, c_name: foo_name}, send_transport: null, "
-      "media_transport: null, "
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], "
+      "c_name: foo_name}, rtcp_report_interval_ms: 2500, "
+      "send_transport: null, media_transport: null, "
       "min_bitrate_bps: 12000, max_bitrate_bps: 34000, "
       "send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
       "cng_payload_type: 42, payload_type: 103, "
@@ -364,7 +351,7 @@
 TEST(AudioSendStreamTest, SetMuted) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(), SetInputMute(true));
+  EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
   send_stream->SetMuted(true);
 }
 
@@ -454,7 +441,7 @@
   helper.config().send_codec_spec->cng_payload_type = 105;
   using ::testing::Invoke;
   std::unique_ptr<AudioEncoder> stolen_encoder;
-  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+  EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
       .WillOnce(
           Invoke([&stolen_encoder](int payload_type,
                                    std::unique_ptr<AudioEncoder>* encoder) {
@@ -474,25 +461,30 @@
 TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(),
-              SetBitrate(helper.config().max_bitrate_bps, _));
+  EXPECT_CALL(*helper.channel_send(),
+              OnBitrateAllocation(
+                  Field(&BitrateAllocationUpdate::target_bitrate,
+                        Eq(DataRate::bps(helper.config().max_bitrate_bps)))));
   BitrateAllocationUpdate update;
-  update.bitrate_bps = helper.config().max_bitrate_bps + 5000;
-  update.fraction_loss = 0;
-  update.rtt = 50;
-  update.bwe_period_ms = 6000;
+  update.target_bitrate = DataRate::bps(helper.config().max_bitrate_bps + 5000);
+  update.packet_loss_ratio = 0;
+  update.round_trip_time = TimeDelta::ms(50);
+  update.bwe_period = TimeDelta::ms(6000);
   send_stream->OnBitrateUpdated(update);
 }
 
 TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(), SetBitrate(_, 5000));
+
+  EXPECT_CALL(*helper.channel_send(),
+              OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
+                                        Eq(TimeDelta::ms(5000)))));
   BitrateAllocationUpdate update;
-  update.bitrate_bps = helper.config().max_bitrate_bps + 5000;
-  update.fraction_loss = 0;
-  update.rtt = 50;
-  update.bwe_period_ms = 5000;
+  update.target_bitrate = DataRate::bps(helper.config().max_bitrate_bps + 5000);
+  update.packet_loss_ratio = 0;
+  update.round_trip_time = TimeDelta::ms(50);
+  update.bwe_period = TimeDelta::ms(5000);
   send_stream->OnBitrateUpdated(update);
 }
 
@@ -504,7 +496,7 @@
   // to be correct, it's instead set-up manually here. Otherwise a simple change
   // to ConfigHelper (say to WillRepeatedly) would silently make this test
   // useless.
-  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+  EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
       .WillOnce(Return(true));
 
   helper.config().send_codec_spec =
@@ -519,15 +511,15 @@
   auto send_stream = helper.CreateAudioSendStream();
   auto new_config = helper.config();
   ConfigHelper::AddBweToConfig(&new_config);
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_send(),
               EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
       .Times(1);
   {
     ::testing::InSequence seq;
-    EXPECT_CALL(*helper.channel_proxy(), ResetSenderCongestionControlObjects())
+    EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
         .Times(1);
-    EXPECT_CALL(*helper.channel_proxy(), RegisterSenderCongestionControlObjects(
-                                             helper.transport(), Ne(nullptr)))
+    EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
+                                            helper.transport(), Ne(nullptr)))
         .Times(1);
   }
   send_stream->Reconfigure(new_config);
@@ -543,11 +535,11 @@
   rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
       new rtc::RefCountedObject<MockFrameEncryptor>());
   new_config.frame_encryptor = mock_frame_encryptor_0;
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(Ne(nullptr))).Times(1);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
   send_stream->Reconfigure(new_config);
 
   // Not updating the frame encryptor shouldn't force it to reconfigure.
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(_)).Times(0);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
   send_stream->Reconfigure(new_config);
 
   // Updating frame encryptor to a new object should force a call to the proxy.
@@ -555,36 +547,8 @@
       new rtc::RefCountedObject<MockFrameEncryptor>());
   new_config.frame_encryptor = mock_frame_encryptor_1;
   new_config.crypto_options.sframe.require_frame_encryption = true;
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(Ne(nullptr))).Times(1);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
   send_stream->Reconfigure(new_config);
 }
-
-// Checks that AudioSendStream logs the times at which RTP packets are sent
-// through its interface.
-TEST(AudioSendStreamTest, UpdateLifetime) {
-  ConfigHelper helper(false, true);
-
-  MockTransport mock_transport;
-  helper.config().send_transport = &mock_transport;
-
-  Transport* registered_transport;
-  ON_CALL(*helper.channel_proxy(), RegisterTransport(_))
-      .WillByDefault(Invoke([&registered_transport](Transport* transport) {
-        registered_transport = transport;
-      }));
-
-  rtc::ScopedFakeClock fake_clock;
-  constexpr int64_t kTimeBetweenSendRtpCallsMs = 100;
-  {
-    auto send_stream = helper.CreateAudioSendStream();
-    EXPECT_CALL(mock_transport, SendRtp(_, _, _)).Times(2);
-    const PacketOptions options;
-    registered_transport->SendRtp(nullptr, 0, options);
-    fake_clock.AdvanceTime(TimeDelta::ms(kTimeBetweenSendRtpCallsMs));
-    registered_transport->SendRtp(nullptr, 0, options);
-  }
-  EXPECT_TRUE(!helper.active_lifetime()->Empty());
-  EXPECT_EQ(helper.active_lifetime()->Length(), kTimeBetweenSendRtpCallsMs);
-}
 }  // namespace test
 }  // namespace webrtc
diff --git a/audio/channel_receive.cc b/audio/channel_receive.cc
index 704ba79..483147f 100644
--- a/audio/channel_receive.cc
+++ b/audio/channel_receive.cc
@@ -18,14 +18,19 @@
 #include <vector>
 
 #include "absl/memory/memory.h"
+#include "audio/audio_level.h"
 #include "audio/channel_send.h"
 #include "audio/utility/audio_frame_operations.h"
 #include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
 #include "modules/audio_device/include/audio_device.h"
 #include "modules/pacing/packet_router.h"
 #include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/contributing_sources.h"
 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "modules/utility/include/process_thread.h"
@@ -34,6 +39,8 @@
 #include "rtc_base/format_macros.h"
 #include "rtc_base/location.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/race_checker.h"
 #include "rtc_base/thread_checker.h"
 #include "rtc_base/timeutils.h"
 #include "system_wrappers/include/metrics.h"
@@ -83,7 +90,187 @@
   return webrtc_header;
 }
 
-}  // namespace
+class ChannelReceive : public ChannelReceiveInterface,
+                       public MediaTransportAudioSinkInterface {
+ public:
+  // Used for receive streams.
+  ChannelReceive(ProcessThread* module_process_thread,
+                 AudioDeviceModule* audio_device_module,
+                 MediaTransportInterface* media_transport,
+                 Transport* rtcp_send_transport,
+                 RtcEventLog* rtc_event_log,
+                 uint32_t remote_ssrc,
+                 size_t jitter_buffer_max_packets,
+                 bool jitter_buffer_fast_playout,
+                 int jitter_buffer_min_delay_ms,
+                 rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+                 absl::optional<AudioCodecPairId> codec_pair_id,
+                 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+                 const webrtc::CryptoOptions& crypto_options);
+  ~ChannelReceive() override;
+
+  void SetSink(AudioSinkInterface* sink) override;
+
+  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+  // API methods
+
+  void StartPlayout() override;
+  void StopPlayout() override;
+
+  // Codecs
+  bool GetRecCodec(CodecInst* codec) const override;
+
+  bool ReceivedRTCPPacket(const uint8_t* data, size_t length) override;
+
+  // RtpPacketSinkInterface.
+  void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+  // Muting, Volume and Level.
+  void SetChannelOutputVolumeScaling(float scaling) override;
+  int GetSpeechOutputLevelFullRange() const override;
+  // See description of "totalAudioEnergy" in the WebRTC stats spec:
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+  double GetTotalOutputEnergy() const override;
+  double GetTotalOutputDuration() const override;
+
+  // Stats.
+  NetworkStatistics GetNetworkStatistics() const override;
+  AudioDecodingCallStats GetDecodingCallStatistics() const override;
+
+  // Audio+Video Sync.
+  uint32_t GetDelayEstimate() const override;
+  void SetMinimumPlayoutDelay(int delayMs) override;
+  uint32_t GetPlayoutTimestamp() const override;
+
+  // Produces the transport-related timestamps; current_delay_ms is left unset.
+  absl::optional<Syncable::Info> GetSyncInfo() const override;
+
+  // RTP+RTCP
+  void SetLocalSSRC(unsigned int ssrc) override;
+
+  void RegisterReceiverCongestionControlObjects(
+      PacketRouter* packet_router) override;
+  void ResetReceiverCongestionControlObjects() override;
+
+  CallReceiveStatistics GetRTCPStatistics() const override;
+  void SetNACKStatus(bool enable, int maxNumberOfPackets) override;
+
+  AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+      int sample_rate_hz,
+      AudioFrame* audio_frame) override;
+
+  int PreferredSampleRate() const override;
+
+  // Associate to a send channel.
+  // Used for obtaining RTT for a receive-only channel.
+  void SetAssociatedSendChannel(const ChannelSendInterface* channel) override;
+
+  std::vector<RtpSource> GetSources() const override;
+
+ private:
+  bool ReceivePacket(const uint8_t* packet,
+                     size_t packet_length,
+                     const RTPHeader& header);
+  int ResendPackets(const uint16_t* sequence_numbers, int length);
+  void UpdatePlayoutTimestamp(bool rtcp);
+
+  int GetRtpTimestampRateHz() const;
+  int64_t GetRTT() const;
+
+  // MediaTransportAudioSinkInterface override;
+  void OnData(uint64_t channel_id,
+              MediaTransportEncodedAudioFrame frame) override;
+
+  int32_t OnReceivedPayloadData(const uint8_t* payloadData,
+                                size_t payloadSize,
+                                const WebRtcRTPHeader* rtpHeader);
+
+  bool Playing() const {
+    rtc::CritScope lock(&playing_lock_);
+    return playing_;
+  }
+
+  // Thread checkers document and lock usage of some methods to specific threads
+  // we know about. The goal is to eventually split up voe::ChannelReceive into
+  // parts with single-threaded semantics, and thereby reduce the need for
+  // locks.
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  // Methods accessed from audio and video threads are checked for sequential-
+  // only access. We don't necessarily own and control these threads, so thread
+  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+  // audio thread to another, but access is still sequential.
+  rtc::RaceChecker audio_thread_race_checker_;
+  rtc::RaceChecker video_capture_thread_race_checker_;
+  rtc::CriticalSection _callbackCritSect;
+  rtc::CriticalSection volume_settings_critsect_;
+
+  rtc::CriticalSection playing_lock_;
+  bool playing_ RTC_GUARDED_BY(&playing_lock_) = false;
+
+  RtcEventLog* const event_log_;
+
+  // Indexed by payload type.
+  std::map<uint8_t, int> payload_type_frequencies_;
+
+  std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+  const uint32_t remote_ssrc_;
+
+  // Info for GetSources and GetSyncInfo is updated on network or worker thread,
+  // queried on the worker thread.
+  rtc::CriticalSection rtp_sources_lock_;
+  ContributingSources contributing_sources_ RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<uint32_t> last_received_rtp_timestamp_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<int64_t> last_received_rtp_system_time_ms_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<uint8_t> last_received_rtp_audio_level_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+
+  std::unique_ptr<AudioCodingModule> audio_coding_;
+  AudioSinkInterface* audio_sink_ = nullptr;
+  AudioLevel _outputAudioLevel;
+
+  RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // Timestamp of the audio pulled from NetEq.
+  absl::optional<uint32_t> jitter_buffer_playout_timestamp_;
+
+  rtc::CriticalSection video_sync_lock_;
+  uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
+  uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
+
+  rtc::CriticalSection ts_stats_lock_;
+
+  std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
+  // The rtp timestamp of the first played out audio frame.
+  int64_t capture_start_rtp_time_stamp_;
+  // The capture ntp time (in local timebase) of the first played out audio
+  // frame.
+  int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // uses
+  ProcessThread* _moduleProcessThreadPtr;
+  AudioDeviceModule* _audioDeviceModulePtr;
+  float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
+
+  // An associated send channel.
+  rtc::CriticalSection assoc_send_channel_lock_;
+  const ChannelSendInterface* associated_send_channel_
+      RTC_GUARDED_BY(assoc_send_channel_lock_);
+
+  PacketRouter* packet_router_ = nullptr;
+
+  rtc::ThreadChecker construction_thread_;
+
+  MediaTransportInterface* const media_transport_;
+
+  // E2EE Audio Frame Decryption
+  rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
+  webrtc::CryptoOptions crypto_options_;
+};
 
 int32_t ChannelReceive::OnReceivedPayloadData(
     const uint8_t* payloadData,
@@ -92,7 +279,7 @@
   // We should not be receiving any RTP packets if media_transport is set.
   RTC_CHECK(!media_transport_);
 
-  if (!channel_state_.Get().playing) {
+  if (!Playing()) {
     // Avoid inserting into NetEQ when we are not playing. Count the
     // packet as discarded.
     return 0;
@@ -123,7 +310,7 @@
                             MediaTransportEncodedAudioFrame frame) {
   RTC_CHECK(media_transport_);
 
-  if (!channel_state_.Get().playing) {
+  if (!Playing()) {
     // Avoid inserting into NetEQ when we are not playing. Count the
     // packet as discarded.
     return;
@@ -142,11 +329,11 @@
 AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
     int sample_rate_hz,
     AudioFrame* audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   audio_frame->sample_rate_hz_ = sample_rate_hz;
 
-  unsigned int ssrc;
-  RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
-  event_log_->Log(absl::make_unique<RtcEventAudioPlayout>(ssrc));
+  event_log_->Log(absl::make_unique<RtcEventAudioPlayout>(remote_ssrc_));
+
   // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
   bool muted;
   if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
@@ -248,6 +435,7 @@
 }
 
 int ChannelReceive::PreferredSampleRate() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   // Return the bigger of playout and receive frequency in the ACM.
   return std::max(audio_coding_->ReceiveFrequency(),
                   audio_coding_->PlayoutFrequency());
@@ -262,6 +450,7 @@
     uint32_t remote_ssrc,
     size_t jitter_buffer_max_packets,
     bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
     rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
     absl::optional<AudioCodecPairId> codec_pair_id,
     rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
@@ -284,6 +473,9 @@
       media_transport_(media_transport),
       frame_decryptor_(frame_decryptor),
       crypto_options_(crypto_options) {
+  // TODO(nisse): Use _moduleProcessThreadPtr instead?
+  module_process_thread_checker_.DetachFromThread();
+
   RTC_DCHECK(module_process_thread);
   RTC_DCHECK(audio_device_module);
   AudioCodingModule::Config acm_config;
@@ -291,6 +483,7 @@
   acm_config.neteq_config.codec_pair_id = codec_pair_id;
   acm_config.neteq_config.max_packets_in_buffer = jitter_buffer_max_packets;
   acm_config.neteq_config.enable_fast_accelerate = jitter_buffer_fast_playout;
+  acm_config.neteq_config.min_delay_ms = jitter_buffer_min_delay_ms;
   acm_config.neteq_config.enable_muted_state = true;
   audio_coding_.reset(AudioCodingModule::Create(acm_config));
 
@@ -308,26 +501,9 @@
   _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
   _rtpRtcpModule->SetSendingMediaStatus(false);
   _rtpRtcpModule->SetRemoteSSRC(remote_ssrc_);
-  Init();
-}
 
-ChannelReceive::~ChannelReceive() {
-  Terminate();
-  RTC_DCHECK(!channel_state_.Get().playing);
-}
-
-void ChannelReceive::Init() {
-  channel_state_.Reset();
-
-  // --- Add modules to process thread (for periodic schedulation)
   _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
 
-  // --- ACM initialization
-  int error = audio_coding_->InitializeReceiver();
-  RTC_DCHECK_EQ(0, error);
-
-  // --- RTP/RTCP module initialization
-
   // Ensure that RTCP is enabled by default for the created channel.
   // Note that, the module will keep generating RTCP until it is explicitly
   // disabled by the user.
@@ -341,63 +517,48 @@
   }
 }
 
-void ChannelReceive::Terminate() {
+ChannelReceive::~ChannelReceive() {
   RTC_DCHECK(construction_thread_.CalledOnValidThread());
 
   if (media_transport_) {
     media_transport_->SetReceiveAudioSink(nullptr);
   }
 
-  // Must be called on the same thread as Init().
-  rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
-
   StopPlayout();
 
-  // The order to safely shutdown modules in a channel is:
-  // 1. De-register callbacks in modules
-  // 2. De-register modules in process thread
-  // 3. Destroy modules
   int error = audio_coding_->RegisterTransportCallback(NULL);
   RTC_DCHECK_EQ(0, error);
 
-  // De-register modules in process thread
   if (_moduleProcessThreadPtr)
     _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
-
-  // End of modules shutdown
 }
 
 void ChannelReceive::SetSink(AudioSinkInterface* sink) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope cs(&_callbackCritSect);
   audio_sink_ = sink;
 }
 
-int32_t ChannelReceive::StartPlayout() {
-  if (channel_state_.Get().playing) {
-    return 0;
-  }
-
-  channel_state_.SetPlaying(true);
-
-  return 0;
+void ChannelReceive::StartPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&playing_lock_);
+  playing_ = true;
 }
 
-int32_t ChannelReceive::StopPlayout() {
-  if (!channel_state_.Get().playing) {
-    return 0;
-  }
-
-  channel_state_.SetPlaying(false);
+void ChannelReceive::StopPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&playing_lock_);
+  playing_ = false;
   _outputAudioLevel.Clear();
-
-  return 0;
 }
 
-int32_t ChannelReceive::GetRecCodec(CodecInst& codec) {
-  return (audio_coding_->ReceiveCodec(&codec));
+bool ChannelReceive::GetRecCodec(CodecInst* codec) const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return (audio_coding_->ReceiveCodec(codec) == 0);
 }
 
 std::vector<webrtc::RtpSource> ChannelReceive::GetSources() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   int64_t now_ms = rtc::TimeMillis();
   std::vector<RtpSource> sources;
   {
@@ -415,6 +576,7 @@
 
 void ChannelReceive::SetReceiveCodecs(
     const std::map<int, SdpAudioFormat>& codecs) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   for (const auto& kv : codecs) {
     RTC_DCHECK_GE(kv.second.clockrate_hz, 1000);
     payload_type_frequencies_[kv.first] = kv.second.clockrate_hz;
@@ -422,7 +584,7 @@
   audio_coding_->SetReceiveCodecs(codecs);
 }
 
-// TODO(nisse): Move receive logic up to AudioReceiveStream.
+// May be called on either worker thread or network thread.
 void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) {
   int64_t now_ms = rtc::TimeMillis();
   uint8_t audio_level;
@@ -513,7 +675,9 @@
                                &webrtc_rtp_header);
 }
 
-int32_t ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+// May be called on either worker thread or network thread.
+// TODO(nisse): Drop always-true return value.
+bool ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
   // Store playout timestamp for the received RTCP packet
   UpdatePlayoutTimestamp(true);
 
@@ -523,7 +687,7 @@
   int64_t rtt = GetRTT();
   if (rtt == 0) {
     // Waiting for valid RTT.
-    return 0;
+    return true;
   }
 
   int64_t nack_window_ms = rtt;
@@ -539,46 +703,45 @@
   if (0 != _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL,
                                      &rtp_timestamp)) {
     // Waiting for RTCP.
-    return 0;
+    return true;
   }
 
   {
     rtc::CritScope lock(&ts_stats_lock_);
     ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
   }
-  return 0;
+  return true;
 }
 
 int ChannelReceive::GetSpeechOutputLevelFullRange() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.LevelFullRange();
 }
 
 double ChannelReceive::GetTotalOutputEnergy() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.TotalEnergy();
 }
 
 double ChannelReceive::GetTotalOutputDuration() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.TotalDuration();
 }
 
 void ChannelReceive::SetChannelOutputVolumeScaling(float scaling) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope cs(&volume_settings_critsect_);
   _outputGain = scaling;
 }
 
-int ChannelReceive::SetLocalSSRC(unsigned int ssrc) {
+void ChannelReceive::SetLocalSSRC(uint32_t ssrc) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   _rtpRtcpModule->SetSSRC(ssrc);
-  return 0;
-}
-
-// TODO(nisse): Pass ssrc in return value instead.
-int ChannelReceive::GetRemoteSSRC(unsigned int& ssrc) {
-  ssrc = remote_ssrc_;
-  return 0;
 }
 
 void ChannelReceive::RegisterReceiverCongestionControlObjects(
     PacketRouter* packet_router) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   RTC_DCHECK(packet_router);
   RTC_DCHECK(!packet_router_);
   constexpr bool remb_candidate = false;
@@ -587,13 +750,16 @@
 }
 
 void ChannelReceive::ResetReceiverCongestionControlObjects() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   RTC_DCHECK(packet_router_);
   packet_router_->RemoveReceiveRtpModule(_rtpRtcpModule.get());
   packet_router_ = nullptr;
 }
 
-int ChannelReceive::GetRTPStatistics(CallReceiveStatistics& stats) {
+CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   // --- RtcpStatistics
+  CallReceiveStatistics stats;
 
   // The jitter statistics is updated for each received RTP packet and is
   // based on received packets.
@@ -630,14 +796,15 @@
     rtc::CritScope lock(&ts_stats_lock_);
     stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
   }
-  return 0;
+  return stats;
 }
 
-void ChannelReceive::SetNACKStatus(bool enable, int maxNumberOfPackets) {
+void ChannelReceive::SetNACKStatus(bool enable, int max_packets) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   // None of these functions can fail.
-  rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
+  rtp_receive_statistics_->SetMaxReorderingThreshold(max_packets);
   if (enable)
-    audio_coding_->EnableNack(maxNumberOfPackets);
+    audio_coding_->EnableNack(max_packets);
   else
     audio_coding_->DisableNack();
 }
@@ -648,54 +815,61 @@
   return _rtpRtcpModule->SendNACK(sequence_numbers, length);
 }
 
-void ChannelReceive::SetAssociatedSendChannel(ChannelSend* channel) {
+void ChannelReceive::SetAssociatedSendChannel(
+    const ChannelSendInterface* channel) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&assoc_send_channel_lock_);
   associated_send_channel_ = channel;
 }
 
-int ChannelReceive::GetNetworkStatistics(NetworkStatistics& stats) {
-  return audio_coding_->GetNetworkStatistics(&stats);
+NetworkStatistics ChannelReceive::GetNetworkStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  NetworkStatistics stats;
+  int error = audio_coding_->GetNetworkStatistics(&stats);
+  RTC_DCHECK_EQ(0, error);
+  return stats;
 }
 
-void ChannelReceive::GetDecodingCallStatistics(
-    AudioDecodingCallStats* stats) const {
-  audio_coding_->GetDecodingCallStatistics(stats);
+AudioDecodingCallStats ChannelReceive::GetDecodingCallStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  AudioDecodingCallStats stats;
+  audio_coding_->GetDecodingCallStatistics(&stats);
+  return stats;
 }
 
 uint32_t ChannelReceive::GetDelayEstimate() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+             module_process_thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&video_sync_lock_);
   return audio_coding_->FilteredCurrentDelayMs() + playout_delay_ms_;
 }
 
-int ChannelReceive::SetMinimumPlayoutDelay(int delayMs) {
-  if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
-      (delayMs > kVoiceEngineMaxMinPlayoutDelayMs)) {
+void ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+  // Limit to range accepted by both VoE and ACM, so we're at least getting as
+  // close as possible, instead of failing.
+  delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
+  if ((delay_ms < kVoiceEngineMinMinPlayoutDelayMs) ||
+      (delay_ms > kVoiceEngineMaxMinPlayoutDelayMs)) {
     RTC_DLOG(LS_ERROR) << "SetMinimumPlayoutDelay() invalid min delay";
-    return -1;
+    return;
   }
-  if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0) {
+  if (audio_coding_->SetMinimumPlayoutDelay(delay_ms) != 0) {
     RTC_DLOG(LS_ERROR)
         << "SetMinimumPlayoutDelay() failed to set min playout delay";
-    return -1;
   }
-  return 0;
 }
 
-int ChannelReceive::GetPlayoutTimestamp(unsigned int& timestamp) {
-  uint32_t playout_timestamp_rtp = 0;
+uint32_t ChannelReceive::GetPlayoutTimestamp() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
   {
     rtc::CritScope lock(&video_sync_lock_);
-    playout_timestamp_rtp = playout_timestamp_rtp_;
+    return playout_timestamp_rtp_;
   }
-  if (playout_timestamp_rtp == 0) {
-    RTC_DLOG(LS_ERROR) << "GetPlayoutTimestamp() failed to retrieve timestamp";
-    return -1;
-  }
-  timestamp = playout_timestamp_rtp;
-  return 0;
 }
 
 absl::optional<Syncable::Info> ChannelReceive::GetSyncInfo() const {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
   Syncable::Info info;
   if (_rtpRtcpModule->RemoteNTP(&info.capture_time_ntp_secs,
                                 &info.capture_time_ntp_frac, nullptr, nullptr,
@@ -757,6 +931,14 @@
 }
 
 int64_t ChannelReceive::GetRTT() const {
+  if (media_transport_) {
+    auto target_rate = media_transport_->GetLatestTargetTransferRate();
+    if (target_rate.has_value()) {
+      return target_rate->network_estimate.round_trip_time.ms();
+    }
+
+    return 0;
+  }
   RtcpMode method = _rtpRtcpModule->RTCP();
   if (method == RtcpMode::kOff) {
     return 0;
@@ -788,5 +970,29 @@
   return rtt;
 }
 
+}  // namespace
+
+std::unique_ptr<ChannelReceiveInterface> CreateChannelReceive(
+    ProcessThread* module_process_thread,
+    AudioDeviceModule* audio_device_module,
+    MediaTransportInterface* media_transport,
+    Transport* rtcp_send_transport,
+    RtcEventLog* rtc_event_log,
+    uint32_t remote_ssrc,
+    size_t jitter_buffer_max_packets,
+    bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+    absl::optional<AudioCodecPairId> codec_pair_id,
+    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+    const webrtc::CryptoOptions& crypto_options) {
+  return absl::make_unique<ChannelReceive>(
+      module_process_thread, audio_device_module, media_transport,
+      rtcp_send_transport, rtc_event_log, remote_ssrc,
+      jitter_buffer_max_packets, jitter_buffer_fast_playout,
+      jitter_buffer_min_delay_ms, decoder_factory, codec_pair_id,
+      frame_decryptor, crypto_options);
+}
+
 }  // namespace voe
 }  // namespace webrtc
diff --git a/audio/channel_receive.h b/audio/channel_receive.h
index 0c50962..9027623 100644
--- a/audio/channel_receive.h
+++ b/audio/channel_receive.h
@@ -17,24 +17,18 @@
 
 #include "absl/types/optional.h"
 #include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
 #include "api/call/audio_sink.h"
 #include "api/call/transport.h"
 #include "api/crypto/cryptooptions.h"
 #include "api/media_transport_interface.h"
 #include "api/rtpreceiverinterface.h"
-#include "audio/audio_level.h"
+#include "call/rtp_packet_sink_interface.h"
 #include "call/syncable.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
-#include "modules/rtp_rtcp/include/rtp_header_parser.h"
-#include "modules/rtp_rtcp/include/rtp_rtcp.h"
-#include "modules/rtp_rtcp/source/contributing_sources.h"
-#include "rtc_base/criticalsection.h"
-#include "rtc_base/thread_checker.h"
 
 // TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
-// warnings about use of unsigned short, and non-const reference arguments.
+// warnings about use of unsigned short.
 // These need cleanup, in a separate cl.
 
 namespace rtc {
@@ -68,210 +62,85 @@
 
 namespace voe {
 
-class ChannelSend;
+class ChannelSendInterface;
 
-// Helper class to simplify locking scheme for members that are accessed from
-// multiple threads.
-// Example: a member can be set on thread T1 and read by an internal audio
-// thread T2. Accessing the member via this class ensures that we are
-// safe and also avoid TSan v2 warnings.
-class ChannelReceiveState {
+// Interface class needed for AudioReceiveStream tests that use a
+// MockChannelReceive.
+
+class ChannelReceiveInterface : public RtpPacketSinkInterface {
  public:
-  struct State {
-    bool playing = false;
-  };
+  virtual ~ChannelReceiveInterface() = default;
 
-  ChannelReceiveState() {}
-  virtual ~ChannelReceiveState() {}
+  virtual void SetSink(AudioSinkInterface* sink) = 0;
 
-  void Reset() {
-    rtc::CritScope lock(&lock_);
-    state_ = State();
-  }
+  virtual void SetReceiveCodecs(
+      const std::map<int, SdpAudioFormat>& codecs) = 0;
 
-  State Get() const {
-    rtc::CritScope lock(&lock_);
-    return state_;
-  }
+  virtual void StartPlayout() = 0;
+  virtual void StopPlayout() = 0;
 
-  void SetPlaying(bool enable) {
-    rtc::CritScope lock(&lock_);
-    state_.playing = enable;
-  }
+  virtual bool GetRecCodec(CodecInst* codec) const = 0;
 
- private:
-  rtc::CriticalSection lock_;
-  State state_;
-};
+  virtual bool ReceivedRTCPPacket(const uint8_t* data, size_t length) = 0;
 
-class ChannelReceive : public RtpData, public MediaTransportAudioSinkInterface {
- public:
-  // Used for receive streams.
-  ChannelReceive(ProcessThread* module_process_thread,
-                 AudioDeviceModule* audio_device_module,
-                 MediaTransportInterface* media_transport,
-                 Transport* rtcp_send_transport,
-                 RtcEventLog* rtc_event_log,
-                 uint32_t remote_ssrc,
-                 size_t jitter_buffer_max_packets,
-                 bool jitter_buffer_fast_playout,
-                 rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
-                 absl::optional<AudioCodecPairId> codec_pair_id,
-                 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
-                 const webrtc::CryptoOptions& crypto_options);
-  virtual ~ChannelReceive();
-
-  void SetSink(AudioSinkInterface* sink);
-
-  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
-
-  // API methods
-
-  // VoEBase
-  int32_t StartPlayout();
-  int32_t StopPlayout();
-
-  // Codecs
-  int32_t GetRecCodec(CodecInst& codec);  // NOLINT
-
-  // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
-  int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
-  void OnRtpPacket(const RtpPacketReceived& packet);
-
-  // Muting, Volume and Level.
-  void SetChannelOutputVolumeScaling(float scaling);
-  int GetSpeechOutputLevelFullRange() const;
+  virtual void SetChannelOutputVolumeScaling(float scaling) = 0;
+  virtual int GetSpeechOutputLevelFullRange() const = 0;
   // See description of "totalAudioEnergy" in the WebRTC stats spec:
   // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
-  double GetTotalOutputEnergy() const;
-  double GetTotalOutputDuration() const;
+  virtual double GetTotalOutputEnergy() const = 0;
+  virtual double GetTotalOutputDuration() const = 0;
 
   // Stats.
-  int GetNetworkStatistics(NetworkStatistics& stats);  // NOLINT
-  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+  virtual NetworkStatistics GetNetworkStatistics() const = 0;
+  virtual AudioDecodingCallStats GetDecodingCallStatistics() const = 0;
 
   // Audio+Video Sync.
-  uint32_t GetDelayEstimate() const;
-  int SetMinimumPlayoutDelay(int delayMs);
-  int GetPlayoutTimestamp(unsigned int& timestamp);  // NOLINT
+  virtual uint32_t GetDelayEstimate() const = 0;
+  virtual void SetMinimumPlayoutDelay(int delay_ms) = 0;
+  virtual uint32_t GetPlayoutTimestamp() const = 0;
 
   // Produces the transport-related timestamps; current_delay_ms is left unset.
-  absl::optional<Syncable::Info> GetSyncInfo() const;
+  virtual absl::optional<Syncable::Info> GetSyncInfo() const = 0;
 
   // RTP+RTCP
-  int SetLocalSSRC(unsigned int ssrc);
+  virtual void SetLocalSSRC(uint32_t ssrc) = 0;
 
-  void RegisterReceiverCongestionControlObjects(PacketRouter* packet_router);
-  void ResetReceiverCongestionControlObjects();
+  virtual void RegisterReceiverCongestionControlObjects(
+      PacketRouter* packet_router) = 0;
+  virtual void ResetReceiverCongestionControlObjects() = 0;
 
-  int GetRTPStatistics(CallReceiveStatistics& stats);  // NOLINT
-  void SetNACKStatus(bool enable, int maxNumberOfPackets);
+  virtual CallReceiveStatistics GetRTCPStatistics() const = 0;
+  virtual void SetNACKStatus(bool enable, int max_packets) = 0;
 
-  // MediaTransportAudioSinkInterface override;
-  void OnData(uint64_t channel_id,
-              MediaTransportEncodedAudioFrame frame) override;
-
-  // From RtpData in the RTP/RTCP module
-  int32_t OnReceivedPayloadData(const uint8_t* payloadData,
-                                size_t payloadSize,
-                                const WebRtcRTPHeader* rtpHeader) override;
-
-  // From AudioMixer::Source.
-  AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+  virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
       int sample_rate_hz,
-      AudioFrame* audio_frame);
+      AudioFrame* audio_frame) = 0;
 
-  int PreferredSampleRate() const;
+  virtual int PreferredSampleRate() const = 0;
 
   // Associate to a send channel.
   // Used for obtaining RTT for a receive-only channel.
-  void SetAssociatedSendChannel(ChannelSend* channel);
+  virtual void SetAssociatedSendChannel(
+      const ChannelSendInterface* channel) = 0;
 
-  std::vector<RtpSource> GetSources() const;
-
- private:
-  void Init();
-  void Terminate();
-
-  int GetRemoteSSRC(unsigned int& ssrc);  // NOLINT
-
-  bool ReceivePacket(const uint8_t* packet,
-                     size_t packet_length,
-                     const RTPHeader& header);
-  int ResendPackets(const uint16_t* sequence_numbers, int length);
-  void UpdatePlayoutTimestamp(bool rtcp);
-
-  int GetRtpTimestampRateHz() const;
-  int64_t GetRTT() const;
-
-  rtc::CriticalSection _callbackCritSect;
-  rtc::CriticalSection volume_settings_critsect_;
-
-  ChannelReceiveState channel_state_;
-
-  RtcEventLog* const event_log_;
-
-  // Indexed by payload type.
-  std::map<uint8_t, int> payload_type_frequencies_;
-
-  std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
-  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
-  const uint32_t remote_ssrc_;
-
-  // Info for GetSources and GetSyncInfo is updated on network or worker thread,
-  // queried on the worker thread.
-  rtc::CriticalSection rtp_sources_lock_;
-  ContributingSources contributing_sources_ RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<uint32_t> last_received_rtp_timestamp_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<int64_t> last_received_rtp_system_time_ms_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<uint8_t> last_received_rtp_audio_level_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-
-  std::unique_ptr<AudioCodingModule> audio_coding_;
-  AudioSinkInterface* audio_sink_ = nullptr;
-  AudioLevel _outputAudioLevel;
-
-  RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
-
-  // Timestamp of the audio pulled from NetEq.
-  absl::optional<uint32_t> jitter_buffer_playout_timestamp_;
-
-  rtc::CriticalSection video_sync_lock_;
-  uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
-  uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
-
-  rtc::CriticalSection ts_stats_lock_;
-
-  std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
-  // The rtp timestamp of the first played out audio frame.
-  int64_t capture_start_rtp_time_stamp_;
-  // The capture ntp time (in local timebase) of the first played out audio
-  // frame.
-  int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
-
-  // uses
-  ProcessThread* _moduleProcessThreadPtr;
-  AudioDeviceModule* _audioDeviceModulePtr;
-  float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
-
-  // An associated send channel.
-  rtc::CriticalSection assoc_send_channel_lock_;
-  ChannelSend* associated_send_channel_
-      RTC_GUARDED_BY(assoc_send_channel_lock_);
-
-  PacketRouter* packet_router_ = nullptr;
-
-  rtc::ThreadChecker construction_thread_;
-
-  MediaTransportInterface* const media_transport_;
-
-  // E2EE Audio Frame Decryption
-  rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
-  webrtc::CryptoOptions crypto_options_;
+  virtual std::vector<RtpSource> GetSources() const = 0;
 };
 
+std::unique_ptr<ChannelReceiveInterface> CreateChannelReceive(
+    ProcessThread* module_process_thread,
+    AudioDeviceModule* audio_device_module,
+    MediaTransportInterface* media_transport,
+    Transport* rtcp_send_transport,
+    RtcEventLog* rtc_event_log,
+    uint32_t remote_ssrc,
+    size_t jitter_buffer_max_packets,
+    bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+    absl::optional<AudioCodecPairId> codec_pair_id,
+    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+    const webrtc::CryptoOptions& crypto_options);
+
 }  // namespace voe
 }  // namespace webrtc
 
diff --git a/audio/channel_receive_proxy.cc b/audio/channel_receive_proxy.cc
deleted file mode 100644
index 1dee640..0000000
--- a/audio/channel_receive_proxy.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/channel_receive_proxy.h"
-
-#include <utility>
-
-#include "api/call/audio_sink.h"
-#include "audio/channel_send_proxy.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/logging.h"
-#include "rtc_base/numerics/safe_minmax.h"
-
-namespace webrtc {
-namespace voe {
-ChannelReceiveProxy::ChannelReceiveProxy() {}
-
-ChannelReceiveProxy::ChannelReceiveProxy(
-    std::unique_ptr<ChannelReceive> channel)
-    : channel_(std::move(channel)) {
-  RTC_DCHECK(channel_);
-  module_process_thread_checker_.DetachFromThread();
-}
-
-ChannelReceiveProxy::~ChannelReceiveProxy() {}
-
-void ChannelReceiveProxy::SetLocalSSRC(uint32_t ssrc) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetLocalSSRC(ssrc);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelReceiveProxy::SetNACKStatus(bool enable, int max_packets) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetNACKStatus(enable, max_packets);
-}
-
-CallReceiveStatistics ChannelReceiveProxy::GetRTCPStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  CallReceiveStatistics stats = {0};
-  int error = channel_->GetRTPStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-bool ChannelReceiveProxy::ReceivedRTCPPacket(const uint8_t* packet,
-                                             size_t length) {
-  // May be called on either worker thread or network thread.
-  return channel_->ReceivedRTCPPacket(packet, length) == 0;
-}
-
-void ChannelReceiveProxy::RegisterReceiverCongestionControlObjects(
-    PacketRouter* packet_router) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterReceiverCongestionControlObjects(packet_router);
-}
-
-void ChannelReceiveProxy::ResetReceiverCongestionControlObjects() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ResetReceiverCongestionControlObjects();
-}
-
-NetworkStatistics ChannelReceiveProxy::GetNetworkStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  NetworkStatistics stats = {0};
-  int error = channel_->GetNetworkStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-AudioDecodingCallStats ChannelReceiveProxy::GetDecodingCallStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  AudioDecodingCallStats stats;
-  channel_->GetDecodingCallStatistics(&stats);
-  return stats;
-}
-
-int ChannelReceiveProxy::GetSpeechOutputLevelFullRange() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetSpeechOutputLevelFullRange();
-}
-
-double ChannelReceiveProxy::GetTotalOutputEnergy() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetTotalOutputEnergy();
-}
-
-double ChannelReceiveProxy::GetTotalOutputDuration() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetTotalOutputDuration();
-}
-
-uint32_t ChannelReceiveProxy::GetDelayEstimate() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
-             module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetDelayEstimate();
-}
-
-void ChannelReceiveProxy::SetReceiveCodecs(
-    const std::map<int, SdpAudioFormat>& codecs) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetReceiveCodecs(codecs);
-}
-
-void ChannelReceiveProxy::SetSink(AudioSinkInterface* sink) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetSink(sink);
-}
-
-void ChannelReceiveProxy::OnRtpPacket(const RtpPacketReceived& packet) {
-  // May be called on either worker thread or network thread.
-  channel_->OnRtpPacket(packet);
-}
-
-void ChannelReceiveProxy::SetChannelOutputVolumeScaling(float scaling) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetChannelOutputVolumeScaling(scaling);
-}
-
-AudioMixer::Source::AudioFrameInfo ChannelReceiveProxy::GetAudioFrameWithInfo(
-    int sample_rate_hz,
-    AudioFrame* audio_frame) {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
-}
-
-int ChannelReceiveProxy::PreferredSampleRate() const {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->PreferredSampleRate();
-}
-
-void ChannelReceiveProxy::AssociateSendChannel(
-    const ChannelSendProxy& send_channel_proxy) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetAssociatedSendChannel(send_channel_proxy.GetChannel());
-}
-
-void ChannelReceiveProxy::DisassociateSendChannel() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetAssociatedSendChannel(nullptr);
-}
-
-absl::optional<Syncable::Info> ChannelReceiveProxy::GetSyncInfo() const {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetSyncInfo();
-}
-
-uint32_t ChannelReceiveProxy::GetPlayoutTimestamp() const {
-  RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
-  unsigned int timestamp = 0;
-  int error = channel_->GetPlayoutTimestamp(timestamp);
-  RTC_DCHECK(!error || timestamp == 0);
-  return timestamp;
-}
-
-void ChannelReceiveProxy::SetMinimumPlayoutDelay(int delay_ms) {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  // Limit to range accepted by both VoE and ACM, so we're at least getting as
-  // close as possible, instead of failing.
-  delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
-  int error = channel_->SetMinimumPlayoutDelay(delay_ms);
-  if (0 != error) {
-    RTC_LOG(LS_WARNING) << "Error setting minimum playout delay.";
-  }
-}
-
-bool ChannelReceiveProxy::GetRecCodec(CodecInst* codec_inst) const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetRecCodec(*codec_inst) == 0;
-}
-
-std::vector<RtpSource> ChannelReceiveProxy::GetSources() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetSources();
-}
-
-void ChannelReceiveProxy::StartPlayout() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StartPlayout();
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelReceiveProxy::StopPlayout() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StopPlayout();
-  RTC_DCHECK_EQ(0, error);
-}
-}  // namespace voe
-}  // namespace webrtc
diff --git a/audio/channel_receive_proxy.h b/audio/channel_receive_proxy.h
deleted file mode 100644
index 8ebacc3..0000000
--- a/audio/channel_receive_proxy.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_CHANNEL_RECEIVE_PROXY_H_
-#define AUDIO_CHANNEL_RECEIVE_PROXY_H_
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include "api/audio/audio_mixer.h"
-#include "api/rtpreceiverinterface.h"
-#include "audio/channel_receive.h"
-#include "call/rtp_packet_sink_interface.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/race_checker.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class AudioSinkInterface;
-class PacketRouter;
-class RtpPacketReceived;
-class Transport;
-
-namespace voe {
-
-class ChannelSendProxy;
-
-// This class provides the "view" of a voe::Channel that we need to implement
-// webrtc::AudioReceiveStream. It serves two purposes:
-//  1. Allow mocking just the interfaces used, instead of the entire
-//     voe::Channel class.
-//  2. Provide a refined interface for the stream classes, including assumptions
-//     on return values and input adaptation.
-class ChannelReceiveProxy : public RtpPacketSinkInterface {
- public:
-  ChannelReceiveProxy();
-  explicit ChannelReceiveProxy(std::unique_ptr<ChannelReceive> channel);
-  virtual ~ChannelReceiveProxy();
-
-  // Shared with ChannelSendProxy
-  virtual void SetLocalSSRC(uint32_t ssrc);
-  virtual void SetNACKStatus(bool enable, int max_packets);
-  virtual CallReceiveStatistics GetRTCPStatistics() const;
-  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
-
-  virtual void RegisterReceiverCongestionControlObjects(
-      PacketRouter* packet_router);
-  virtual void ResetReceiverCongestionControlObjects();
-  virtual NetworkStatistics GetNetworkStatistics() const;
-  virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
-  virtual int GetSpeechOutputLevelFullRange() const;
-  // See description of "totalAudioEnergy" in the WebRTC stats spec:
-  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
-  virtual double GetTotalOutputEnergy() const;
-  virtual double GetTotalOutputDuration() const;
-  virtual uint32_t GetDelayEstimate() const;
-  virtual void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
-  virtual void SetSink(AudioSinkInterface* sink);
-
-  // Implements RtpPacketSinkInterface
-  void OnRtpPacket(const RtpPacketReceived& packet) override;
-
-  virtual void SetChannelOutputVolumeScaling(float scaling);
-  virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
-      int sample_rate_hz,
-      AudioFrame* audio_frame);
-  virtual int PreferredSampleRate() const;
-  virtual void AssociateSendChannel(const ChannelSendProxy& send_channel_proxy);
-  virtual void DisassociateSendChannel();
-
-  // Produces the transport-related timestamps; current_delay_ms is left unset.
-  absl::optional<Syncable::Info> GetSyncInfo() const;
-  virtual uint32_t GetPlayoutTimestamp() const;
-  virtual void SetMinimumPlayoutDelay(int delay_ms);
-  virtual bool GetRecCodec(CodecInst* codec_inst) const;
-  virtual std::vector<webrtc::RtpSource> GetSources() const;
-  virtual void StartPlayout();
-  virtual void StopPlayout();
-
- private:
-  // Thread checkers document and lock usage of some methods on voe::Channel to
-  // specific threads we know about. The goal is to eventually split up
-  // voe::Channel into parts with single-threaded semantics, and thereby reduce
-  // the need for locks.
-  rtc::ThreadChecker worker_thread_checker_;
-  rtc::ThreadChecker module_process_thread_checker_;
-  // Methods accessed from audio and video threads are checked for sequential-
-  // only access. We don't necessarily own and control these threads, so thread
-  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
-  // audio thread to another, but access is still sequential.
-  rtc::RaceChecker audio_thread_race_checker_;
-  rtc::RaceChecker video_capture_thread_race_checker_;
-  std::unique_ptr<ChannelReceive> channel_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelReceiveProxy);
-};
-}  // namespace voe
-}  // namespace webrtc
-
-#endif  // AUDIO_CHANNEL_RECEIVE_PROXY_H_
diff --git a/audio/channel_send.cc b/audio/channel_send.cc
index c0de939..c458fe4 100644
--- a/audio/channel_send.cc
+++ b/audio/channel_send.cc
@@ -19,12 +19,15 @@
 
 #include "absl/memory/memory.h"
 #include "api/array_view.h"
+#include "api/call/transport.h"
 #include "api/crypto/frameencryptorinterface.h"
 #include "audio/utility/audio_frame_operations.h"
 #include "call/rtp_transport_controller_send_interface.h"
 #include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_processing/rms_level.h"
 #include "modules/pacing/packet_router.h"
 #include "modules/utility/include/process_thread.h"
 #include "rtc_base/checks.h"
@@ -33,6 +36,8 @@
 #include "rtc_base/format_macros.h"
 #include "rtc_base/location.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/race_checker.h"
 #include "rtc_base/rate_limiter.h"
 #include "rtc_base/task_queue.h"
 #include "rtc_base/thread_checker.h"
@@ -66,7 +71,241 @@
   }
 }
 
-}  // namespace
+class RtpPacketSenderProxy;
+class TransportFeedbackProxy;
+class TransportSequenceNumberProxy;
+class VoERtcpObserver;
+
+class ChannelSend
+    : public ChannelSendInterface,
+      public Transport,
+      public OverheadObserver,
+      public AudioPacketizationCallback,  // receive encoded packets from the
+                                          // ACM
+      public TargetTransferRateObserver {
+ public:
+  // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend
+  // declaration.
+  friend class VoERtcpObserver;
+
+  ChannelSend(rtc::TaskQueue* encoder_queue,
+              ProcessThread* module_process_thread,
+              MediaTransportInterface* media_transport,
+              Transport* rtp_transport,
+              RtcpRttStats* rtcp_rtt_stats,
+              RtcEventLog* rtc_event_log,
+              FrameEncryptorInterface* frame_encryptor,
+              const webrtc::CryptoOptions& crypto_options,
+              bool extmap_allow_mixed,
+              int rtcp_report_interval_ms);
+
+  ~ChannelSend() override;
+
+  // Send using this encoder, with this payload type.
+  bool SetEncoder(int payload_type,
+                  std::unique_ptr<AudioEncoder> encoder) override;
+  void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+                         modifier) override;
+
+  // API methods
+  void StartSend() override;
+  void StopSend() override;
+
+  // Codecs
+  void OnBitrateAllocation(BitrateAllocationUpdate update) override;
+  int GetBitrate() const override;
+
+  // Network
+  bool ReceivedRTCPPacket(const uint8_t* data, size_t length) override;
+
+  // Muting, Volume and Level.
+  void SetInputMute(bool enable) override;
+
+  // Stats.
+  ANAStats GetANAStatistics() const override;
+
+  // Used by AudioSendStream.
+  RtpRtcp* GetRtpRtcp() const override;
+
+  // DTMF.
+  bool SendTelephoneEventOutband(int event, int duration_ms) override;
+  bool SetSendTelephoneEventPayloadType(int payload_type,
+                                        int payload_frequency) override;
+
+  // RTP+RTCP
+  void SetLocalSSRC(uint32_t ssrc) override;
+  void SetMid(const std::string& mid, int extension_id) override;
+  void SetExtmapAllowMixed(bool extmap_allow_mixed) override;
+  void SetSendAudioLevelIndicationStatus(bool enable, int id) override;
+  void EnableSendTransportSequenceNumber(int id) override;
+
+  void RegisterSenderCongestionControlObjects(
+      RtpTransportControllerSendInterface* transport,
+      RtcpBandwidthObserver* bandwidth_observer) override;
+  void ResetSenderCongestionControlObjects() override;
+  void SetRTCP_CNAME(absl::string_view c_name) override;
+  std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const override;
+  CallSendStatistics GetRTCPStatistics() const override;
+
+  // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
+  // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
+  // the actual processing of the audio takes place. The processing mainly
+  // consists of encoding and preparing the result for sending by adding it to a
+  // send queue.
+  // The main reason for using a task queue here is to release the native,
+  // OS-specific, audio capture thread as soon as possible to ensure that it
+  // can go back to sleep and be prepared to deliver an new captured audio
+  // packet.
+  void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) override;
+
+  void SetTransportOverhead(size_t transport_overhead_per_packet) override;
+
+  // The existence of this function alongside OnUplinkPacketLossRate is
+  // a compromise. We want the encoder to be agnostic of the PLR source, but
+  // we also don't want it to receive conflicting information from TWCC and
+  // from RTCP-XR.
+  void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) override;
+
+  void OnRecoverableUplinkPacketLossRate(
+      float recoverable_packet_loss_rate) override;
+
+  int64_t GetRTT() const override;
+
+  // E2EE Custom Audio Frame Encryption
+  void SetFrameEncryptor(
+      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) override;
+
+ private:
+  class ProcessAndEncodeAudioTask;
+
+  // From AudioPacketizationCallback in the ACM
+  int32_t SendData(FrameType frameType,
+                   uint8_t payloadType,
+                   uint32_t timeStamp,
+                   const uint8_t* payloadData,
+                   size_t payloadSize,
+                   const RTPFragmentationHeader* fragmentation) override;
+
+  // From Transport (called by the RTP/RTCP module)
+  bool SendRtp(const uint8_t* data,
+               size_t len,
+               const PacketOptions& packet_options) override;
+  bool SendRtcp(const uint8_t* data, size_t len) override;
+
+  // From OverheadObserver in the RTP/RTCP module
+  void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
+
+  void OnUplinkPacketLossRate(float packet_loss_rate);
+  bool InputMute() const;
+
+  int SetSendRtpHeaderExtension(bool enable, RTPExtensionType type, int id);
+
+  void UpdateOverheadForEncoder()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
+
+  int32_t SendRtpAudio(FrameType frameType,
+                       uint8_t payloadType,
+                       uint32_t timeStamp,
+                       rtc::ArrayView<const uint8_t> payload,
+                       const RTPFragmentationHeader* fragmentation);
+
+  int32_t SendMediaTransportAudio(FrameType frameType,
+                                  uint8_t payloadType,
+                                  uint32_t timeStamp,
+                                  rtc::ArrayView<const uint8_t> payload,
+                                  const RTPFragmentationHeader* fragmentation);
+
+  // Return media transport or nullptr if using RTP.
+  MediaTransportInterface* media_transport() { return media_transport_; }
+
+  // Called on the encoder task queue when a new input audio frame is ready
+  // for encoding.
+  void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
+
+  void OnReceivedRtt(int64_t rtt_ms);
+
+  void OnTargetTransferRate(TargetTransferRate) override;
+
+  // Thread checkers document and lock usage of some methods on voe::Channel to
+  // specific threads we know about. The goal is to eventually split up
+  // voe::Channel into parts with single-threaded semantics, and thereby reduce
+  // the need for locks.
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  // Methods accessed from audio and video threads are checked for sequential-
+  // only access. We don't necessarily own and control these threads, so thread
+  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+  // audio thread to another, but access is still sequential.
+  rtc::RaceChecker audio_thread_race_checker_;
+
+  rtc::CriticalSection _callbackCritSect;
+  rtc::CriticalSection volume_settings_critsect_;
+
+  bool sending_ RTC_GUARDED_BY(&worker_thread_checker_) = false;
+
+  RtcEventLog* const event_log_;
+
+  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+
+  std::unique_ptr<AudioCodingModule> audio_coding_;
+  uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_);
+
+  uint16_t send_sequence_number_;
+
+  // uses
+  ProcessThread* const _moduleProcessThreadPtr;
+  Transport* const _transportPtr;  // WebRtc socket or external transport
+  RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_);
+  bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
+  bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_);
+  // VoeRTP_RTCP
+  // TODO(henrika): can today be accessed on the main thread and on the
+  // task queue; hence potential race.
+  bool _includeAudioLevelIndication;
+  size_t transport_overhead_per_packet_
+      RTC_GUARDED_BY(overhead_per_packet_lock_);
+  size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
+  rtc::CriticalSection overhead_per_packet_lock_;
+  // RtcpBandwidthObserver
+  const std::unique_ptr<VoERtcpObserver> rtcp_observer_;
+
+  PacketRouter* packet_router_ RTC_GUARDED_BY(&worker_thread_checker_) =
+      nullptr;
+  const std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
+  const std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
+  const std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
+  const std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
+
+  rtc::ThreadChecker construction_thread_;
+
+  const bool use_twcc_plr_for_ana_;
+
+  rtc::CriticalSection encoder_queue_lock_;
+  bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
+  rtc::TaskQueue* const encoder_queue_ = nullptr;
+
+  MediaTransportInterface* const media_transport_;
+  int media_transport_sequence_number_ RTC_GUARDED_BY(encoder_queue_) = 0;
+
+  rtc::CriticalSection media_transport_lock_;
+  // Currently set by SetLocalSSRC.
+  uint64_t media_transport_channel_id_ RTC_GUARDED_BY(&media_transport_lock_) =
+      0;
+  // Cache payload type and sampling frequency from most recent call to
+  // SetEncoder. Needed to set MediaTransportEncodedAudioFrame metadata, and
+  // invalidate on encoder change.
+  int media_transport_payload_type_ RTC_GUARDED_BY(&media_transport_lock_);
+  int media_transport_sampling_frequency_
+      RTC_GUARDED_BY(&media_transport_lock_);
+
+  // E2EE Audio Frame Encryption
+  rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor_;
+  // E2EE Frame Encryption Options
+  const webrtc::CryptoOptions crypto_options_;
+
+  rtc::CriticalSection bitrate_crit_section_;
+  int configured_bitrate_bps_ RTC_GUARDED_BY(bitrate_crit_section_) = 0;
+};
 
 const int kTelephoneEventAttenuationdB = 10;
 
@@ -441,26 +680,22 @@
   return true;
 }
 
-int ChannelSend::PreferredSampleRate() const {
-  // Return the bigger of playout and receive frequency in the ACM.
-  return std::max(audio_coding_->ReceiveFrequency(),
-                  audio_coding_->PlayoutFrequency());
-}
-
 ChannelSend::ChannelSend(rtc::TaskQueue* encoder_queue,
                          ProcessThread* module_process_thread,
                          MediaTransportInterface* media_transport,
+                         Transport* rtp_transport,
                          RtcpRttStats* rtcp_rtt_stats,
                          RtcEventLog* rtc_event_log,
                          FrameEncryptorInterface* frame_encryptor,
                          const webrtc::CryptoOptions& crypto_options,
-                         bool extmap_allow_mixed)
+                         bool extmap_allow_mixed,
+                         int rtcp_report_interval_ms)
     : event_log_(rtc_event_log),
       _timeStamp(0),  // This is just an offset, RTP module will add it's own
                       // random offset
       send_sequence_number_(0),
       _moduleProcessThreadPtr(module_process_thread),
-      _transportPtr(NULL),
+      _transportPtr(rtp_transport),
       input_mute_(false),
       previous_frame_muted_(false),
       _includeAudioLevelIndication(false),
@@ -480,47 +715,51 @@
       crypto_options_(crypto_options) {
   RTC_DCHECK(module_process_thread);
   RTC_DCHECK(encoder_queue);
+  module_process_thread_checker_.DetachFromThread();
+
   audio_coding_.reset(AudioCodingModule::Create(AudioCodingModule::Config()));
 
   RtpRtcp::Configuration configuration;
+
+  // We gradually remove codepaths that depend on RTP when using media
+  // transport. All of this logic should be moved to the future
+  // RTPMediaTransport. In this case it means that overhead and bandwidth
+  // observers should not be called when using media transport.
+  if (!media_transport_) {
+    configuration.overhead_observer = this;
+    configuration.bandwidth_callback = rtcp_observer_.get();
+    configuration.transport_feedback_callback = feedback_observer_proxy_.get();
+  }
+
   configuration.audio = true;
   configuration.outgoing_transport = this;
-  configuration.overhead_observer = this;
-  configuration.bandwidth_callback = rtcp_observer_.get();
 
   configuration.paced_sender = rtp_packet_sender_proxy_.get();
   configuration.transport_sequence_number_allocator =
       seq_num_allocator_proxy_.get();
-  configuration.transport_feedback_callback = feedback_observer_proxy_.get();
 
   configuration.event_log = event_log_;
   configuration.rtt_stats = rtcp_rtt_stats;
   configuration.retransmission_rate_limiter =
       retransmission_rate_limiter_.get();
   configuration.extmap_allow_mixed = extmap_allow_mixed;
+  configuration.rtcp_report_interval_ms = rtcp_report_interval_ms;
 
   _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
   _rtpRtcpModule->SetSendingMediaStatus(false);
-  Init();
-}
 
-ChannelSend::~ChannelSend() {
-  Terminate();
-  RTC_DCHECK(!channel_state_.Get().sending);
-}
+  // We want to invoke the 'TargetRateObserver' and |OnOverheadChanged|
+  // callbacks after the audio_coding_ is fully initialized.
+  if (media_transport_) {
+    RTC_DLOG(LS_INFO) << "Setting media_transport_ rate observers.";
+    media_transport_->AddTargetTransferRateObserver(this);
+    OnOverheadChanged(media_transport_->GetAudioPacketOverhead());
+  } else {
+    RTC_DLOG(LS_INFO) << "Not setting media_transport_ rate observers.";
+  }
 
-void ChannelSend::Init() {
-  channel_state_.Reset();
-
-  // --- Add modules to process thread (for periodic schedulation)
   _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
 
-  // --- ACM initialization
-  int error = audio_coding_->InitializeReceiver();
-  RTC_DCHECK_EQ(0, error);
-
-  // --- RTP/RTCP module initialization
-
   // Ensure that RTCP is enabled by default for the created channel.
   // Note that, the module will keep generating RTCP until it is explicitly
   // disabled by the user.
@@ -529,36 +768,30 @@
   // RTCP is enabled by default.
   _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
 
-  // --- Register all permanent callbacks
-  error = audio_coding_->RegisterTransportCallback(this);
+  int error = audio_coding_->RegisterTransportCallback(this);
   RTC_DCHECK_EQ(0, error);
 }
 
-void ChannelSend::Terminate() {
+ChannelSend::~ChannelSend() {
   RTC_DCHECK(construction_thread_.CalledOnValidThread());
-  // Must be called on the same thread as Init().
+
+  if (media_transport_) {
+    media_transport_->RemoveTargetTransferRateObserver(this);
+  }
 
   StopSend();
 
-  // The order to safely shutdown modules in a channel is:
-  // 1. De-register callbacks in modules
-  // 2. De-register modules in process thread
-  // 3. Destroy modules
   int error = audio_coding_->RegisterTransportCallback(NULL);
   RTC_DCHECK_EQ(0, error);
 
-  // De-register modules in process thread
   if (_moduleProcessThreadPtr)
     _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
-
-  // End of modules shutdown
 }
 
-int32_t ChannelSend::StartSend() {
-  if (channel_state_.Get().sending) {
-    return 0;
-  }
-  channel_state_.SetSending(true);
+void ChannelSend::StartSend() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK(!sending_);
+  sending_ = true;
 
   // Resume the previous sequence number which was reset by StopSend(). This
   // needs to be done before |sending| is set to true on the RTP/RTCP module.
@@ -566,26 +799,21 @@
     _rtpRtcpModule->SetSequenceNumber(send_sequence_number_);
   }
   _rtpRtcpModule->SetSendingMediaStatus(true);
-  if (_rtpRtcpModule->SetSendingStatus(true) != 0) {
-    RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending";
-    _rtpRtcpModule->SetSendingMediaStatus(false);
-    rtc::CritScope cs(&_callbackCritSect);
-    channel_state_.SetSending(false);
-    return -1;
-  }
+  int ret = _rtpRtcpModule->SetSendingStatus(true);
+  RTC_DCHECK_EQ(0, ret);
   {
     // It is now OK to start posting tasks to the encoder task queue.
     rtc::CritScope cs(&encoder_queue_lock_);
     encoder_queue_is_active_ = true;
   }
-  return 0;
 }
 
 void ChannelSend::StopSend() {
-  if (!channel_state_.Get().sending) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  if (!sending_) {
     return;
   }
-  channel_state_.SetSending(false);
+  sending_ = false;
 
   // Post a task to the encoder thread which sets an event when the task is
   // executed. We know that no more encoding tasks will be added to the task
@@ -623,6 +851,7 @@
 
 bool ChannelSend::SetEncoder(int payload_type,
                              std::unique_ptr<AudioEncoder> encoder) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_GE(payload_type, 0);
   RTC_DCHECK_LE(payload_type, 127);
   // TODO(ossu): Make CodecInsts up, for now: one for the RTP/RTCP module and
@@ -666,24 +895,35 @@
 
 void ChannelSend::ModifyEncoder(
     rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   audio_coding_->ModifyEncoder(modifier);
 }
 
-void ChannelSend::SetBitRate(int bitrate_bps, int64_t probing_interval_ms) {
+void ChannelSend::OnBitrateAllocation(BitrateAllocationUpdate update) {
+  // This method can be called on the worker thread, module process thread
+  // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
+  // TODO(solenberg): Figure out a good way to check this or enforce calling
+  // rules.
+  // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+  //            module_process_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&bitrate_crit_section_);
+
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
     if (*encoder) {
-      (*encoder)->OnReceivedUplinkBandwidth(bitrate_bps, probing_interval_ms);
+      (*encoder)->OnReceivedUplinkAllocation(update);
     }
   });
-  retransmission_rate_limiter_->SetMaxRate(bitrate_bps);
-  configured_bitrate_bps_ = bitrate_bps;
+  retransmission_rate_limiter_->SetMaxRate(update.target_bitrate.bps());
+  configured_bitrate_bps_ = update.target_bitrate.bps();
 }
 
-int ChannelSend::GetBitRate() const {
+int ChannelSend::GetBitrate() const {
+  rtc::CritScope lock(&bitrate_crit_section_);
   return configured_bitrate_bps_;
 }
 
 void ChannelSend::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   if (!use_twcc_plr_for_ana_)
     return;
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
@@ -695,6 +935,7 @@
 
 void ChannelSend::OnRecoverableUplinkPacketLossRate(
     float recoverable_packet_loss_rate) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
     if (*encoder) {
       (*encoder)->OnReceivedUplinkRecoverablePacketLossFraction(
@@ -713,47 +954,22 @@
   });
 }
 
-bool ChannelSend::EnableAudioNetworkAdaptor(const std::string& config_string) {
-  bool success = false;
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder) {
-      success =
-          (*encoder)->EnableAudioNetworkAdaptor(config_string, event_log_);
-    }
-  });
-  return success;
-}
+// TODO(nisse): Delete always-true return value.
+bool ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+  // May be called on either worker thread or network thread.
+  if (media_transport_) {
+    // Ignore RTCP packets while media transport is used.
+    // Those packets should not arrive, but we are seeing occasional packets.
+    return 0;
+  }
 
-void ChannelSend::DisableAudioNetworkAdaptor() {
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder)
-      (*encoder)->DisableAudioNetworkAdaptor();
-  });
-}
-
-void ChannelSend::SetReceiverFrameLengthRange(int min_frame_length_ms,
-                                              int max_frame_length_ms) {
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder) {
-      (*encoder)->SetReceiverFrameLengthRange(min_frame_length_ms,
-                                              max_frame_length_ms);
-    }
-  });
-}
-
-void ChannelSend::RegisterTransport(Transport* transport) {
-  rtc::CritScope cs(&_callbackCritSect);
-  _transportPtr = transport;
-}
-
-int32_t ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
   // Deliver RTCP packet to RTP/RTCP module for parsing
   _rtpRtcpModule->IncomingRtcpPacket(data, length);
 
   int64_t rtt = GetRTT();
   if (rtt == 0) {
     // Waiting for valid RTT.
-    return 0;
+    return true;
   }
 
   int64_t nack_window_ms = rtt;
@@ -764,16 +980,12 @@
   }
   retransmission_rate_limiter_->SetWindowSize(nack_window_ms);
 
-  // Invoke audio encoders OnReceivedRtt().
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder)
-      (*encoder)->OnReceivedRtt(rtt);
-  });
-
-  return 0;
+  OnReceivedRtt(rtt);
+  return true;
 }
 
 void ChannelSend::SetInputMute(bool enable) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&volume_settings_critsect_);
   input_mute_ = enable;
 }
@@ -783,24 +995,26 @@
   return input_mute_;
 }
 
-int ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) {
+bool ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_LE(0, event);
   RTC_DCHECK_GE(255, event);
   RTC_DCHECK_LE(0, duration_ms);
   RTC_DCHECK_GE(65535, duration_ms);
-  if (!Sending()) {
-    return -1;
+  if (!sending_) {
+    return false;
   }
   if (_rtpRtcpModule->SendTelephoneEventOutband(
           event, duration_ms, kTelephoneEventAttenuationdB) != 0) {
     RTC_DLOG(LS_ERROR) << "SendTelephoneEventOutband() failed to send event";
-    return -1;
+    return false;
   }
-  return 0;
+  return true;
 }
 
-int ChannelSend::SetSendTelephoneEventPayloadType(int payload_type,
-                                                  int payload_frequency) {
+bool ChannelSend::SetSendTelephoneEventPayloadType(int payload_type,
+                                                   int payload_frequency) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_LE(0, payload_type);
   RTC_DCHECK_GE(127, payload_type);
   CodecInst codec = {0};
@@ -813,42 +1027,44 @@
       RTC_DLOG(LS_ERROR)
           << "SetSendTelephoneEventPayloadType() failed to register "
              "send payload type";
-      return -1;
+      return false;
     }
   }
-  return 0;
+  return true;
 }
 
-int ChannelSend::SetLocalSSRC(unsigned int ssrc) {
-  if (channel_state_.Get().sending) {
-    RTC_DLOG(LS_ERROR) << "SetLocalSSRC() already sending";
-    return -1;
-  }
+void ChannelSend::SetLocalSSRC(uint32_t ssrc) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK(!sending_);
+
   if (media_transport_) {
     rtc::CritScope cs(&media_transport_lock_);
     media_transport_channel_id_ = ssrc;
   }
   _rtpRtcpModule->SetSSRC(ssrc);
-  return 0;
 }
 
 void ChannelSend::SetMid(const std::string& mid, int extension_id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   int ret = SetSendRtpHeaderExtension(true, kRtpExtensionMid, extension_id);
   RTC_DCHECK_EQ(0, ret);
   _rtpRtcpModule->SetMid(mid);
 }
 
 void ChannelSend::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   _rtpRtcpModule->SetExtmapAllowMixed(extmap_allow_mixed);
 }
 
-int ChannelSend::SetSendAudioLevelIndicationStatus(bool enable,
-                                                   unsigned char id) {
+void ChannelSend::SetSendAudioLevelIndicationStatus(bool enable, int id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   _includeAudioLevelIndication = enable;
-  return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+  int ret = SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+  RTC_DCHECK_EQ(0, ret);
 }
 
 void ChannelSend::EnableSendTransportSequenceNumber(int id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   int ret =
       SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
   RTC_DCHECK_EQ(0, ret);
@@ -857,6 +1073,7 @@
 void ChannelSend::RegisterSenderCongestionControlObjects(
     RtpTransportControllerSendInterface* transport,
     RtcpBandwidthObserver* bandwidth_observer) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RtpPacketSender* rtp_packet_sender = transport->packet_sender();
   TransportFeedbackObserver* transport_feedback_observer =
       transport->transport_feedback_observer();
@@ -878,6 +1095,7 @@
 }
 
 void ChannelSend::ResetSenderCongestionControlObjects() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK(packet_router_);
   _rtpRtcpModule->SetStorePacketsStatus(false, 600);
   rtcp_observer_->SetBandwidthObserver(nullptr);
@@ -888,35 +1106,25 @@
   rtp_packet_sender_proxy_->SetPacketSender(nullptr);
 }
 
-void ChannelSend::SetRTCPStatus(bool enable) {
-  _rtpRtcpModule->SetRTCPStatus(enable ? RtcpMode::kCompound : RtcpMode::kOff);
+void ChannelSend::SetRTCP_CNAME(absl::string_view c_name) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  // Note: SetCNAME() accepts a c string of length at most 255.
+  const std::string c_name_limited(c_name.substr(0, 255));
+  int ret = _rtpRtcpModule->SetCNAME(c_name_limited.c_str()) != 0;
+  RTC_DCHECK_EQ(0, ret) << "SetRTCP_CNAME() failed to set RTCP CNAME";
 }
 
-int ChannelSend::SetRTCP_CNAME(const char cName[256]) {
-  if (_rtpRtcpModule->SetCNAME(cName) != 0) {
-    RTC_DLOG(LS_ERROR) << "SetRTCP_CNAME() failed to set RTCP CNAME";
-    return -1;
-  }
-  return 0;
-}
-
-int ChannelSend::GetRemoteRTCPReportBlocks(
-    std::vector<ReportBlock>* report_blocks) {
-  if (report_blocks == NULL) {
-    RTC_DLOG(LS_ERROR) << "GetRemoteRTCPReportBlock()s invalid report_blocks.";
-    return -1;
-  }
-
+std::vector<ReportBlock> ChannelSend::GetRemoteRTCPReportBlocks() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   // Get the report blocks from the latest received RTCP Sender or Receiver
   // Report. Each element in the vector contains the sender's SSRC and a
   // report block according to RFC 3550.
   std::vector<RTCPReportBlock> rtcp_report_blocks;
-  if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
-    return -1;
-  }
 
-  if (rtcp_report_blocks.empty())
-    return 0;
+  int ret = _rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks);
+  RTC_DCHECK_EQ(0, ret);
+
+  std::vector<ReportBlock> report_blocks;
 
   std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
   for (; it != rtcp_report_blocks.end(); ++it) {
@@ -930,19 +1138,16 @@
     report_block.interarrival_jitter = it->jitter;
     report_block.last_SR_timestamp = it->last_sender_report_timestamp;
     report_block.delay_since_last_SR = it->delay_since_last_sender_report;
-    report_blocks->push_back(report_block);
+    report_blocks.push_back(report_block);
   }
-  return 0;
+  return report_blocks;
 }
 
-int ChannelSend::GetRTPStatistics(CallSendStatistics& stats) {
-  // --- RtcpStatistics
-
-  // --- RTT
+CallSendStatistics ChannelSend::GetRTCPStatistics() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  CallSendStatistics stats = {0};
   stats.rttMs = GetRTT();
 
-  // --- Data counters
-
   size_t bytesSent(0);
   uint32_t packetsSent(0);
 
@@ -955,24 +1160,12 @@
   stats.bytesSent = bytesSent;
   stats.packetsSent = packetsSent;
 
-  return 0;
-}
-
-void ChannelSend::SetNACKStatus(bool enable, int maxNumberOfPackets) {
-  // None of these functions can fail.
-  if (enable)
-    audio_coding_->EnableNack(maxNumberOfPackets);
-  else
-    audio_coding_->DisableNack();
-}
-
-// Called when we are missing one or more packets.
-int ChannelSend::ResendPackets(const uint16_t* sequence_numbers, int length) {
-  return _rtpRtcpModule->SendNACK(sequence_numbers, length);
+  return stats;
 }
 
 void ChannelSend::ProcessAndEncodeAudio(
     std::unique_ptr<AudioFrame> audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   // Avoid posting any new tasks if sending was already stopped in StopSend().
   rtc::CritScope cs(&encoder_queue_lock_);
   if (!encoder_queue_is_active_) {
@@ -1038,6 +1231,7 @@
 }
 
 void ChannelSend::SetTransportOverhead(size_t transport_overhead_per_packet) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&overhead_per_packet_lock_);
   transport_overhead_per_packet_ = transport_overhead_per_packet;
   UpdateOverheadForEncoder();
@@ -1051,36 +1245,42 @@
 }
 
 ANAStats ChannelSend::GetANAStatistics() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   return audio_coding_->GetANAStats();
 }
 
 RtpRtcp* ChannelSend::GetRtpRtcp() const {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
   return _rtpRtcpModule.get();
 }
 
 int ChannelSend::SetSendRtpHeaderExtension(bool enable,
                                            RTPExtensionType type,
-                                           unsigned char id) {
+                                           int id) {
   int error = 0;
   _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
   if (enable) {
-    error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
+    // TODO(nisse): RtpRtcp::RegisterSendRtpHeaderExtension to take an int
+    // argument. Currently it wants an uint8_t.
+    error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(
+        type, rtc::dchecked_cast<uint8_t>(id));
   }
   return error;
 }
 
-int ChannelSend::GetRtpTimestampRateHz() const {
-  const auto format = audio_coding_->ReceiveFormat();
-  // Default to the playout frequency if we've not gotten any packets yet.
-  // TODO(ossu): Zero clockrate can only happen if we've added an external
-  // decoder for a format we don't support internally. Remove once that way of
-  // adding decoders is gone!
-  return (format && format->clockrate_hz != 0)
-             ? format->clockrate_hz
-             : audio_coding_->PlayoutFrequency();
-}
-
 int64_t ChannelSend::GetRTT() const {
+  if (media_transport_) {
+    // GetRTT is generally used in the RTCP codepath, where media transport is
+    // not present and so it shouldn't be needed. But it's also invoked in
+    // 'GetStats' method, and for now returning media transport RTT here gives
+    // us "free" rtt stats for media transport.
+    auto target_rate = media_transport_->GetLatestTargetTransferRate();
+    if (target_rate.has_value()) {
+      return target_rate.value().network_estimate.round_trip_time.ms();
+    }
+
+    return 0;
+  }
   RtcpMode method = _rtpRtcpModule->RTCP();
   if (method == RtcpMode::kOff) {
     return 0;
@@ -1107,6 +1307,7 @@
 
 void ChannelSend::SetFrameEncryptor(
     rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&encoder_queue_lock_);
   if (encoder_queue_is_active_) {
     encoder_queue_->PostTask([this, frame_encryptor]() {
@@ -1117,5 +1318,39 @@
   }
 }
 
+void ChannelSend::OnTargetTransferRate(TargetTransferRate rate) {
+  RTC_DCHECK(media_transport_);
+  OnReceivedRtt(rate.network_estimate.round_trip_time.ms());
+}
+
+void ChannelSend::OnReceivedRtt(int64_t rtt_ms) {
+  // Invoke audio encoders OnReceivedRtt().
+  audio_coding_->ModifyEncoder(
+      [rtt_ms](std::unique_ptr<AudioEncoder>* encoder) {
+        if (*encoder) {
+          (*encoder)->OnReceivedRtt(rtt_ms);
+        }
+      });
+}
+
+}  // namespace
+
+std::unique_ptr<ChannelSendInterface> CreateChannelSend(
+    rtc::TaskQueue* encoder_queue,
+    ProcessThread* module_process_thread,
+    MediaTransportInterface* media_transport,
+    Transport* rtp_transport,
+    RtcpRttStats* rtcp_rtt_stats,
+    RtcEventLog* rtc_event_log,
+    FrameEncryptorInterface* frame_encryptor,
+    const webrtc::CryptoOptions& crypto_options,
+    bool extmap_allow_mixed,
+    int rtcp_report_interval_ms) {
+  return absl::make_unique<ChannelSend>(
+      encoder_queue, module_process_thread, media_transport, rtp_transport,
+      rtcp_rtt_stats, rtc_event_log, frame_encryptor, crypto_options,
+      extmap_allow_mixed, rtcp_report_interval_ms);
+}
+
 }  // namespace voe
 }  // namespace webrtc
diff --git a/audio/channel_send.h b/audio/channel_send.h
index 407303f..083e9a6 100644
--- a/audio/channel_send.h
+++ b/audio/channel_send.h
@@ -11,44 +11,26 @@
 #ifndef AUDIO_CHANNEL_SEND_H_
 #define AUDIO_CHANNEL_SEND_H_
 
-#include <map>
 #include <memory>
 #include <string>
 #include <vector>
 
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/audio_encoder.h"
-#include "api/call/transport.h"
 #include "api/crypto/cryptooptions.h"
 #include "api/media_transport_interface.h"
-#include "common_types.h"  // NOLINT(build/include)
-#include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/audio_processing/rms_level.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp.h"
-#include "rtc_base/criticalsection.h"
+#include "rtc_base/function_view.h"
 #include "rtc_base/task_queue.h"
-#include "rtc_base/thread_checker.h"
-
-// TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
-// warnings about use of unsigned short, and non-const reference arguments.
-// These need cleanup, in a separate cl.
-
-namespace rtc {
-class TimestampWrapAroundHandler;
-}
 
 namespace webrtc {
 
 class FrameEncryptorInterface;
-class PacketRouter;
 class ProcessThread;
-class RateLimiter;
 class RtcEventLog;
 class RtpRtcp;
 class RtpTransportControllerSendInterface;
 
-struct SenderInfo;
-
 struct CallSendStatistics {
   int64_t rttMs;
   size_t bytesSent;
@@ -69,277 +51,77 @@
 
 namespace voe {
 
-class RtpPacketSenderProxy;
-class TransportFeedbackProxy;
-class TransportSequenceNumberProxy;
-class VoERtcpObserver;
-
-// Helper class to simplify locking scheme for members that are accessed from
-// multiple threads.
-// Example: a member can be set on thread T1 and read by an internal audio
-// thread T2. Accessing the member via this class ensures that we are
-// safe and also avoid TSan v2 warnings.
-class ChannelSendState {
+class ChannelSendInterface {
  public:
-  struct State {
-    bool sending = false;
-  };
+  virtual ~ChannelSendInterface() = default;
 
-  ChannelSendState() {}
-  virtual ~ChannelSendState() {}
+  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length) = 0;
 
-  void Reset() {
-    rtc::CritScope lock(&lock_);
-    state_ = State();
-  }
+  virtual CallSendStatistics GetRTCPStatistics() const = 0;
 
-  State Get() const {
-    rtc::CritScope lock(&lock_);
-    return state_;
-  }
+  virtual bool SetEncoder(int payload_type,
+                          std::unique_ptr<AudioEncoder> encoder) = 0;
+  virtual void ModifyEncoder(
+      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) = 0;
 
-  void SetSending(bool enable) {
-    rtc::CritScope lock(&lock_);
-    state_.sending = enable;
-  }
-
- private:
-  rtc::CriticalSection lock_;
-  State state_;
-};
-
-class ChannelSend
-    : public Transport,
-      public AudioPacketizationCallback,  // receive encoded packets from the
-                                          // ACM
-      public OverheadObserver {
- public:
-  // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend
-  // declaration.
-  friend class VoERtcpObserver;
-
-  ChannelSend(rtc::TaskQueue* encoder_queue,
-              ProcessThread* module_process_thread,
-              MediaTransportInterface* media_transport,
-              RtcpRttStats* rtcp_rtt_stats,
-              RtcEventLog* rtc_event_log,
-              FrameEncryptorInterface* frame_encryptor,
-              const webrtc::CryptoOptions& crypto_options,
-              bool extmap_allow_mixed);
-
-  virtual ~ChannelSend();
-
-  // Send using this encoder, with this payload type.
-  bool SetEncoder(int payload_type, std::unique_ptr<AudioEncoder> encoder);
-  void ModifyEncoder(
-      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
-
-  // API methods
-
-  // VoEBase
-  int32_t StartSend();
-  void StopSend();
-
-  // Codecs
-  void SetBitRate(int bitrate_bps, int64_t probing_interval_ms);
-  int GetBitRate() const;
-  bool EnableAudioNetworkAdaptor(const std::string& config_string);
-  void DisableAudioNetworkAdaptor();
-
-  // TODO(nisse): Modifies decoder, but not used?
-  void SetReceiverFrameLengthRange(int min_frame_length_ms,
-                                   int max_frame_length_ms);
-
-  // Network
-  void RegisterTransport(Transport* transport);
-  // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
-  int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
-
-  // Muting, Volume and Level.
-  void SetInputMute(bool enable);
-
-  // Stats.
-  ANAStats GetANAStatistics() const;
-
-  // Used by AudioSendStream.
-  RtpRtcp* GetRtpRtcp() const;
-
-  // DTMF.
-  int SendTelephoneEventOutband(int event, int duration_ms);
-  int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
-
-  // RTP+RTCP
-  int SetLocalSSRC(unsigned int ssrc);
-
-  void SetMid(const std::string& mid, int extension_id);
-  void SetExtmapAllowMixed(bool extmap_allow_mixed);
-  int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
-  void EnableSendTransportSequenceNumber(int id);
-
-  void RegisterSenderCongestionControlObjects(
+  virtual void SetLocalSSRC(uint32_t ssrc) = 0;
+  virtual void SetMid(const std::string& mid, int extension_id) = 0;
+  virtual void SetRTCP_CNAME(absl::string_view c_name) = 0;
+  virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0;
+  virtual void SetSendAudioLevelIndicationStatus(bool enable, int id) = 0;
+  virtual void EnableSendTransportSequenceNumber(int id) = 0;
+  virtual void RegisterSenderCongestionControlObjects(
       RtpTransportControllerSendInterface* transport,
-      RtcpBandwidthObserver* bandwidth_observer);
-  void ResetSenderCongestionControlObjects();
-  void SetRTCPStatus(bool enable);
-  int SetRTCP_CNAME(const char cName[256]);
-  int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
-  int GetRTPStatistics(CallSendStatistics& stats);  // NOLINT
-  void SetNACKStatus(bool enable, int maxNumberOfPackets);
+      RtcpBandwidthObserver* bandwidth_observer) = 0;
+  virtual void ResetSenderCongestionControlObjects() = 0;
+  virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const = 0;
+  virtual ANAStats GetANAStatistics() const = 0;
+  virtual bool SetSendTelephoneEventPayloadType(int payload_type,
+                                                int payload_frequency) = 0;
+  virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
+  virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
+  virtual int GetBitrate() const = 0;
+  virtual void SetInputMute(bool muted) = 0;
 
-  // From AudioPacketizationCallback in the ACM
-  int32_t SendData(FrameType frameType,
-                   uint8_t payloadType,
-                   uint32_t timeStamp,
-                   const uint8_t* payloadData,
-                   size_t payloadSize,
-                   const RTPFragmentationHeader* fragmentation) override;
+  virtual void ProcessAndEncodeAudio(
+      std::unique_ptr<AudioFrame> audio_frame) = 0;
+  virtual void SetTransportOverhead(size_t transport_overhead_per_packet) = 0;
+  virtual RtpRtcp* GetRtpRtcp() const = 0;
 
-  // From Transport (called by the RTP/RTCP module)
-  bool SendRtp(const uint8_t* data,
-               size_t len,
-               const PacketOptions& packet_options) override;
-  bool SendRtcp(const uint8_t* data, size_t len) override;
+  virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) = 0;
+  virtual void OnRecoverableUplinkPacketLossRate(
+      float recoverable_packet_loss_rate) = 0;
+  // In RTP we currently rely on RTCP packets (|ReceivedRTCPPacket|) to inform
+  // about RTT.
+  // In media transport we rely on the TargetTransferRateObserver instead.
+  // In other words, if you are using RTP, you should expect
+  // |ReceivedRTCPPacket| to be called, if you are using media transport,
+  // |OnTargetTransferRate| will be called.
+  //
+  // In future, RTP media will move to the media transport implementation and
+  // these conditions will be removed.
+  // Returns the RTT in milliseconds.
+  virtual int64_t GetRTT() const = 0;
+  virtual void StartSend() = 0;
+  virtual void StopSend() = 0;
 
-  int PreferredSampleRate() const;
-
-  bool Sending() const { return channel_state_.Get().sending; }
-  RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); }
-
-  // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
-  // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
-  // the actual processing of the audio takes place. The processing mainly
-  // consists of encoding and preparing the result for sending by adding it to a
-  // send queue.
-  // The main reason for using a task queue here is to release the native,
-  // OS-specific, audio capture thread as soon as possible to ensure that it
-  // can go back to sleep and be prepared to deliver an new captured audio
-  // packet.
-  void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
-
-  void SetTransportOverhead(size_t transport_overhead_per_packet);
-
-  // From OverheadObserver in the RTP/RTCP module
-  void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
-
-  // The existence of this function alongside OnUplinkPacketLossRate is
-  // a compromise. We want the encoder to be agnostic of the PLR source, but
-  // we also don't want it to receive conflicting information from TWCC and
-  // from RTCP-XR.
-  void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
-
-  void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate);
-
-  int64_t GetRTT() const;
-
-  // E2EE Custom Audio Frame Encryption
-  void SetFrameEncryptor(
-      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor);
-
- private:
-  class ProcessAndEncodeAudioTask;
-
-  void Init();
-  void Terminate();
-
-  void OnUplinkPacketLossRate(float packet_loss_rate);
-  bool InputMute() const;
-
-  int ResendPackets(const uint16_t* sequence_numbers, int length);
-
-  int SetSendRtpHeaderExtension(bool enable,
-                                RTPExtensionType type,
-                                unsigned char id);
-
-  void UpdateOverheadForEncoder()
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
-
-  int GetRtpTimestampRateHz() const;
-
-  int32_t SendRtpAudio(FrameType frameType,
-                       uint8_t payloadType,
-                       uint32_t timeStamp,
-                       rtc::ArrayView<const uint8_t> payload,
-                       const RTPFragmentationHeader* fragmentation);
-
-  int32_t SendMediaTransportAudio(FrameType frameType,
-                                  uint8_t payloadType,
-                                  uint32_t timeStamp,
-                                  rtc::ArrayView<const uint8_t> payload,
-                                  const RTPFragmentationHeader* fragmentation);
-
-  // Return media transport or nullptr if using RTP.
-  MediaTransportInterface* media_transport() { return media_transport_; }
-
-  // Called on the encoder task queue when a new input audio frame is ready
-  // for encoding.
-  void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
-
-  rtc::CriticalSection _callbackCritSect;
-  rtc::CriticalSection volume_settings_critsect_;
-
-  ChannelSendState channel_state_;
-
-  RtcEventLog* const event_log_;
-
-  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
-
-  std::unique_ptr<AudioCodingModule> audio_coding_;
-  uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_);
-
-  uint16_t send_sequence_number_;
-
-  // uses
-  ProcessThread* _moduleProcessThreadPtr;
-  Transport* _transportPtr;  // WebRtc socket or external transport
-  RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_);
-  bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
-  bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_);
-  // VoeRTP_RTCP
-  // TODO(henrika): can today be accessed on the main thread and on the
-  // task queue; hence potential race.
-  bool _includeAudioLevelIndication;
-  size_t transport_overhead_per_packet_
-      RTC_GUARDED_BY(overhead_per_packet_lock_);
-  size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
-  rtc::CriticalSection overhead_per_packet_lock_;
-  // RtcpBandwidthObserver
-  std::unique_ptr<VoERtcpObserver> rtcp_observer_;
-
-  PacketRouter* packet_router_ = nullptr;
-  std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
-  std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
-  std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
-  std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
-
-  rtc::ThreadChecker construction_thread_;
-
-  const bool use_twcc_plr_for_ana_;
-
-  rtc::CriticalSection encoder_queue_lock_;
-  bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
-  rtc::TaskQueue* encoder_queue_ = nullptr;
-
-  MediaTransportInterface* const media_transport_;
-  int media_transport_sequence_number_ RTC_GUARDED_BY(encoder_queue_) = 0;
-
-  rtc::CriticalSection media_transport_lock_;
-  // Currently set by SetLocalSSRC.
-  uint64_t media_transport_channel_id_ RTC_GUARDED_BY(&media_transport_lock_) =
-      0;
-  // Cache payload type and sampling frequency from most recent call to
-  // SetEncoder. Needed to set MediaTransportEncodedAudioFrame metadata, and
-  // invalidate on encoder change.
-  int media_transport_payload_type_ RTC_GUARDED_BY(&media_transport_lock_);
-  int media_transport_sampling_frequency_
-      RTC_GUARDED_BY(&media_transport_lock_);
-
-  // E2EE Audio Frame Encryption
-  rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor_;
-  // E2EE Frame Encryption Options
-  webrtc::CryptoOptions crypto_options_;
-  int configured_bitrate_bps_ = 0;
+  // E2EE Custom Audio Frame Encryption (Optional)
+  virtual void SetFrameEncryptor(
+      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) = 0;
 };
 
+std::unique_ptr<ChannelSendInterface> CreateChannelSend(
+    rtc::TaskQueue* encoder_queue,
+    ProcessThread* module_process_thread,
+    MediaTransportInterface* media_transport,
+    Transport* rtp_transport,
+    RtcpRttStats* rtcp_rtt_stats,
+    RtcEventLog* rtc_event_log,
+    FrameEncryptorInterface* frame_encryptor,
+    const webrtc::CryptoOptions& crypto_options,
+    bool extmap_allow_mixed,
+    int rtcp_report_interval_ms);
+
 }  // namespace voe
 }  // namespace webrtc
 
diff --git a/audio/channel_send_proxy.cc b/audio/channel_send_proxy.cc
deleted file mode 100644
index 2d0bdd3..0000000
--- a/audio/channel_send_proxy.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/channel_send_proxy.h"
-
-#include <utility>
-
-#include "api/crypto/frameencryptorinterface.h"
-#include "call/rtp_transport_controller_send_interface.h"
-#include "rtc_base/checks.h"
-
-namespace webrtc {
-namespace voe {
-ChannelSendProxy::ChannelSendProxy() {}
-
-ChannelSendProxy::ChannelSendProxy(std::unique_ptr<ChannelSend> channel)
-    : channel_(std::move(channel)) {
-  RTC_DCHECK(channel_);
-  module_process_thread_checker_.DetachFromThread();
-}
-
-ChannelSendProxy::~ChannelSendProxy() {}
-
-void ChannelSendProxy::SetLocalSSRC(uint32_t ssrc) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetLocalSSRC(ssrc);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::SetNACKStatus(bool enable, int max_packets) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetNACKStatus(enable, max_packets);
-}
-
-CallSendStatistics ChannelSendProxy::GetRTCPStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  CallSendStatistics stats = {0};
-  int error = channel_->GetRTPStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-void ChannelSendProxy::RegisterTransport(Transport* transport) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterTransport(transport);
-}
-
-bool ChannelSendProxy::ReceivedRTCPPacket(const uint8_t* packet,
-                                          size_t length) {
-  // May be called on either worker thread or network thread.
-  return channel_->ReceivedRTCPPacket(packet, length) == 0;
-}
-
-bool ChannelSendProxy::SetEncoder(int payload_type,
-                                  std::unique_ptr<AudioEncoder> encoder) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SetEncoder(payload_type, std::move(encoder));
-}
-
-void ChannelSendProxy::ModifyEncoder(
-    rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ModifyEncoder(modifier);
-}
-
-void ChannelSendProxy::SetRTCPStatus(bool enable) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetRTCPStatus(enable);
-}
-
-void ChannelSendProxy::SetMid(const std::string& mid, int extension_id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetMid(mid, extension_id);
-}
-
-void ChannelSendProxy::SetRTCP_CNAME(const std::string& c_name) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  // Note: VoERTP_RTCP::SetRTCP_CNAME() accepts a char[256] array.
-  std::string c_name_limited = c_name.substr(0, 255);
-  int error = channel_->SetRTCP_CNAME(c_name_limited.c_str());
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::SetExtmapAllowMixed(bool extmap_allow_mixed) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetExtmapAllowMixed(extmap_allow_mixed);
-}
-
-void ChannelSendProxy::SetSendAudioLevelIndicationStatus(bool enable, int id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetSendAudioLevelIndicationStatus(enable, id);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::EnableSendTransportSequenceNumber(int id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->EnableSendTransportSequenceNumber(id);
-}
-
-void ChannelSendProxy::RegisterSenderCongestionControlObjects(
-    RtpTransportControllerSendInterface* transport,
-    RtcpBandwidthObserver* bandwidth_observer) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterSenderCongestionControlObjects(transport,
-                                                   bandwidth_observer);
-}
-
-void ChannelSendProxy::ResetSenderCongestionControlObjects() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ResetSenderCongestionControlObjects();
-}
-
-std::vector<ReportBlock> ChannelSendProxy::GetRemoteRTCPReportBlocks() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  std::vector<webrtc::ReportBlock> blocks;
-  int error = channel_->GetRemoteRTCPReportBlocks(&blocks);
-  RTC_DCHECK_EQ(0, error);
-  return blocks;
-}
-
-ANAStats ChannelSendProxy::GetANAStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetANAStatistics();
-}
-
-bool ChannelSendProxy::SetSendTelephoneEventPayloadType(int payload_type,
-                                                        int payload_frequency) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SetSendTelephoneEventPayloadType(payload_type,
-                                                    payload_frequency) == 0;
-}
-
-bool ChannelSendProxy::SendTelephoneEventOutband(int event, int duration_ms) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SendTelephoneEventOutband(event, duration_ms) == 0;
-}
-
-void ChannelSendProxy::SetBitrate(int bitrate_bps,
-                                  int64_t probing_interval_ms) {
-  // This method can be called on the worker thread, module process thread
-  // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
-  // TODO(solenberg): Figure out a good way to check this or enforce calling
-  // rules.
-  // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
-  //            module_process_thread_checker_.CalledOnValidThread());
-  channel_->SetBitRate(bitrate_bps, probing_interval_ms);
-}
-
-int ChannelSendProxy::GetBitrate() const {
-  return channel_->GetBitRate();
-}
-
-void ChannelSendProxy::SetInputMute(bool muted) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetInputMute(muted);
-}
-
-void ChannelSendProxy::ProcessAndEncodeAudio(
-    std::unique_ptr<AudioFrame> audio_frame) {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->ProcessAndEncodeAudio(std::move(audio_frame));
-}
-
-void ChannelSendProxy::SetTransportOverhead(int transport_overhead_per_packet) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetTransportOverhead(transport_overhead_per_packet);
-}
-
-RtpRtcp* ChannelSendProxy::GetRtpRtcp() const {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetRtpRtcp();
-}
-
-void ChannelSendProxy::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->OnTwccBasedUplinkPacketLossRate(packet_loss_rate);
-}
-
-void ChannelSendProxy::OnRecoverableUplinkPacketLossRate(
-    float recoverable_packet_loss_rate) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->OnRecoverableUplinkPacketLossRate(recoverable_packet_loss_rate);
-}
-
-void ChannelSendProxy::StartSend() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StartSend();
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::StopSend() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->StopSend();
-}
-
-ChannelSend* ChannelSendProxy::GetChannel() const {
-  return channel_.get();
-}
-
-void ChannelSendProxy::SetFrameEncryptor(
-    rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetFrameEncryptor(frame_encryptor);
-}
-
-}  // namespace voe
-}  // namespace webrtc
diff --git a/audio/channel_send_proxy.h b/audio/channel_send_proxy.h
deleted file mode 100644
index 3146830..0000000
--- a/audio/channel_send_proxy.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_CHANNEL_SEND_PROXY_H_
-#define AUDIO_CHANNEL_SEND_PROXY_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "api/audio_codecs/audio_encoder.h"
-#include "audio/channel_send.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/race_checker.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class FrameEncryptorInterface;
-class RtcpBandwidthObserver;
-class RtpRtcp;
-class RtpTransportControllerSendInterface;
-class Transport;
-
-namespace voe {
-
-// This class provides the "view" of a voe::Channel that we need to implement
-// webrtc::AudioSendStream. It serves two purposes:
-//  1. Allow mocking just the interfaces used, instead of the entire
-//     voe::Channel class.
-//  2. Provide a refined interface for the stream classes, including assumptions
-//     on return values and input adaptation.
-class ChannelSendProxy {
- public:
-  ChannelSendProxy();
-  explicit ChannelSendProxy(std::unique_ptr<ChannelSend> channel);
-  virtual ~ChannelSendProxy();
-
-  // Shared with ChannelReceiveProxy
-  virtual void SetLocalSSRC(uint32_t ssrc);
-  virtual void SetNACKStatus(bool enable, int max_packets);
-  virtual CallSendStatistics GetRTCPStatistics() const;
-  virtual void RegisterTransport(Transport* transport);
-  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
-
-  virtual bool SetEncoder(int payload_type,
-                          std::unique_ptr<AudioEncoder> encoder);
-  virtual void ModifyEncoder(
-      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
-
-  virtual void SetRTCPStatus(bool enable);
-  virtual void SetMid(const std::string& mid, int extension_id);
-  virtual void SetRTCP_CNAME(const std::string& c_name);
-  virtual void SetExtmapAllowMixed(bool extmap_allow_mixed);
-  virtual void SetSendAudioLevelIndicationStatus(bool enable, int id);
-  virtual void EnableSendTransportSequenceNumber(int id);
-  virtual void RegisterSenderCongestionControlObjects(
-      RtpTransportControllerSendInterface* transport,
-      RtcpBandwidthObserver* bandwidth_observer);
-  virtual void ResetSenderCongestionControlObjects();
-  virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const;
-  virtual ANAStats GetANAStatistics() const;
-  virtual bool SetSendTelephoneEventPayloadType(int payload_type,
-                                                int payload_frequency);
-  virtual bool SendTelephoneEventOutband(int event, int duration_ms);
-  virtual void SetBitrate(int bitrate_bps, int64_t probing_interval_ms);
-  virtual int GetBitrate() const;
-  virtual void SetInputMute(bool muted);
-
-  virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
-  virtual void SetTransportOverhead(int transport_overhead_per_packet);
-  virtual RtpRtcp* GetRtpRtcp() const;
-
-  virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
-  virtual void OnRecoverableUplinkPacketLossRate(
-      float recoverable_packet_loss_rate);
-  virtual void StartSend();
-  virtual void StopSend();
-
-  // Needed by ChannelReceiveProxy::AssociateSendChannel.
-  virtual ChannelSend* GetChannel() const;
-
-  // E2EE Custom Audio Frame Encryption (Optional)
-  virtual void SetFrameEncryptor(
-      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor);
-
- private:
-  // Thread checkers document and lock usage of some methods on voe::Channel to
-  // specific threads we know about. The goal is to eventually split up
-  // voe::Channel into parts with single-threaded semantics, and thereby reduce
-  // the need for locks.
-  rtc::ThreadChecker worker_thread_checker_;
-  rtc::ThreadChecker module_process_thread_checker_;
-  // Methods accessed from audio and video threads are checked for sequential-
-  // only access. We don't necessarily own and control these threads, so thread
-  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
-  // audio thread to another, but access is still sequential.
-  rtc::RaceChecker audio_thread_race_checker_;
-  rtc::RaceChecker video_capture_thread_race_checker_;
-  std::unique_ptr<ChannelSend> channel_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelSendProxy);
-};
-}  // namespace voe
-}  // namespace webrtc
-
-#endif  // AUDIO_CHANNEL_SEND_PROXY_H_
diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h
index 962152f..eee25c5 100644
--- a/audio/mock_voe_channel_proxy.h
+++ b/audio/mock_voe_channel_proxy.h
@@ -17,15 +17,15 @@
 #include <vector>
 
 #include "api/test/mock_frame_encryptor.h"
-#include "audio/channel_receive_proxy.h"
-#include "audio/channel_send_proxy.h"
+#include "audio/channel_receive.h"
+#include "audio/channel_send.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "test/gmock.h"
 
 namespace webrtc {
 namespace test {
 
-class MockChannelReceiveProxy : public voe::ChannelReceiveProxy {
+class MockChannelReceive : public voe::ChannelReceiveInterface {
  public:
   MOCK_METHOD1(SetLocalSSRC, void(uint32_t ssrc));
   MOCK_METHOD2(SetNACKStatus, void(bool enable, int max_packets));
@@ -47,10 +47,10 @@
                AudioMixer::Source::AudioFrameInfo(int sample_rate_hz,
                                                   AudioFrame* audio_frame));
   MOCK_CONST_METHOD0(PreferredSampleRate, int());
-  MOCK_METHOD1(AssociateSendChannel,
-               void(const voe::ChannelSendProxy& send_channel_proxy));
-  MOCK_METHOD0(DisassociateSendChannel, void());
+  MOCK_METHOD1(SetAssociatedSendChannel,
+               void(const voe::ChannelSendInterface* send_channel));
   MOCK_CONST_METHOD0(GetPlayoutTimestamp, uint32_t());
+  MOCK_CONST_METHOD0(GetSyncInfo, absl::optional<Syncable::Info>());
   MOCK_METHOD1(SetMinimumPlayoutDelay, void(int delay_ms));
   MOCK_CONST_METHOD1(GetRecCodec, bool(CodecInst* codec_inst));
   MOCK_METHOD1(SetReceiveCodecs,
@@ -60,7 +60,7 @@
   MOCK_METHOD0(StopPlayout, void());
 };
 
-class MockChannelSendProxy : public voe::ChannelSendProxy {
+class MockChannelSend : public voe::ChannelSendInterface {
  public:
   // GMock doesn't like move-only types, like std::unique_ptr.
   virtual bool SetEncoder(int payload_type,
@@ -72,10 +72,9 @@
   MOCK_METHOD1(
       ModifyEncoder,
       void(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier));
-  MOCK_METHOD1(SetRTCPStatus, void(bool enable));
+  MOCK_METHOD2(SetMid, void(const std::string& mid, int extension_id));
   MOCK_METHOD1(SetLocalSSRC, void(uint32_t ssrc));
-  MOCK_METHOD1(SetRTCP_CNAME, void(const std::string& c_name));
-  MOCK_METHOD2(SetNACKStatus, void(bool enable, int max_packets));
+  MOCK_METHOD1(SetRTCP_CNAME, void(absl::string_view c_name));
   MOCK_METHOD1(SetExtmapAllowMixed, void(bool extmap_allow_mixed));
   MOCK_METHOD2(SetSendAudioLevelIndicationStatus, void(bool enable, int id));
   MOCK_METHOD1(EnableSendTransportSequenceNumber, void(int id));
@@ -89,9 +88,8 @@
   MOCK_METHOD2(SetSendTelephoneEventPayloadType,
                bool(int payload_type, int payload_frequency));
   MOCK_METHOD2(SendTelephoneEventOutband, bool(int event, int duration_ms));
-  MOCK_METHOD2(SetBitrate, void(int bitrate_bps, int64_t probing_interval_ms));
+  MOCK_METHOD1(OnBitrateAllocation, void(BitrateAllocationUpdate update));
   MOCK_METHOD1(SetInputMute, void(bool muted));
-  MOCK_METHOD1(RegisterTransport, void(Transport* transport));
   MOCK_METHOD2(ReceivedRTCPPacket, bool(const uint8_t* packet, size_t length));
   // GMock doesn't like move-only types, like std::unique_ptr.
   virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) {
@@ -99,12 +97,14 @@
   }
   MOCK_METHOD1(ProcessAndEncodeAudioForMock,
                void(std::unique_ptr<AudioFrame>* audio_frame));
-  MOCK_METHOD1(SetTransportOverhead, void(int transport_overhead_per_packet));
+  MOCK_METHOD1(SetTransportOverhead,
+               void(size_t transport_overhead_per_packet));
   MOCK_CONST_METHOD0(GetRtpRtcp, RtpRtcp*());
   MOCK_CONST_METHOD0(GetBitrate, int());
   MOCK_METHOD1(OnTwccBasedUplinkPacketLossRate, void(float packet_loss_rate));
   MOCK_METHOD1(OnRecoverableUplinkPacketLossRate,
                void(float recoverable_packet_loss_rate));
+  MOCK_CONST_METHOD0(GetRTT, int64_t());
   MOCK_METHOD0(StartSend, void());
   MOCK_METHOD0(StopSend, void());
   MOCK_METHOD1(
diff --git a/audio/time_interval.cc b/audio/time_interval.cc
deleted file mode 100644
index cc10340..0000000
--- a/audio/time_interval.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/time_interval.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/timeutils.h"
-
-namespace webrtc {
-
-TimeInterval::TimeInterval() = default;
-TimeInterval::~TimeInterval() = default;
-
-void TimeInterval::Extend() {
-  Extend(rtc::TimeMillis());
-}
-
-void TimeInterval::Extend(int64_t time) {
-  if (!interval_) {
-    interval_.emplace(time, time);
-  } else {
-    if (time < interval_->first) {
-      interval_->first = time;
-    }
-    if (time > interval_->last) {
-      interval_->last = time;
-    }
-  }
-}
-
-void TimeInterval::Extend(const TimeInterval& other_interval) {
-  if (!other_interval.Empty()) {
-    Extend(other_interval.interval_->first);
-    Extend(other_interval.interval_->last);
-  }
-}
-
-bool TimeInterval::Empty() const {
-  return !interval_;
-}
-
-int64_t TimeInterval::Length() const {
-  RTC_DCHECK(interval_);
-  return interval_->last - interval_->first;
-}
-
-TimeInterval::Interval::Interval(int64_t first, int64_t last)
-    : first(first), last(last) {}
-
-}  // namespace webrtc
diff --git a/audio/time_interval.h b/audio/time_interval.h
deleted file mode 100644
index 79fe29d..0000000
--- a/audio/time_interval.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_TIME_INTERVAL_H_
-#define AUDIO_TIME_INTERVAL_H_
-
-#include <stdint.h>
-
-#include "absl/types/optional.h"
-
-namespace webrtc {
-
-// This class logs the first and last time its Extend() function is called.
-//
-// This class is not thread-safe; Extend() calls should only be made by a
-// single thread at a time, such as within a lock or destructor.
-//
-// Example usage:
-//   // let x < y < z < u < v
-//   rtc::TimeInterval interval;
-//   ...  //   interval.Extend(); // at time x
-//   ...
-//   interval.Extend(); // at time y
-//   ...
-//   interval.Extend(); // at time u
-//   ...
-//   interval.Extend(z); // at time v
-//   ...
-//   if (!interval.Empty()) {
-//     int64_t active_time = interval.Length(); // returns (u - x)
-//   }
-class TimeInterval {
- public:
-  TimeInterval();
-  ~TimeInterval();
-  // Extend the interval with the current time.
-  void Extend();
-  // Extend the interval with a given time.
-  void Extend(int64_t time);
-  // Take the convex hull with another interval.
-  void Extend(const TimeInterval& other_interval);
-  // True iff Extend has never been called.
-  bool Empty() const;
-  // Returns the time between the first and the last tick, in milliseconds.
-  int64_t Length() const;
-
- private:
-  struct Interval {
-    Interval(int64_t first, int64_t last);
-
-    int64_t first, last;
-  };
-  absl::optional<Interval> interval_;
-};
-
-}  // namespace webrtc
-
-#endif  // AUDIO_TIME_INTERVAL_H_
diff --git a/audio/time_interval_unittest.cc b/audio/time_interval_unittest.cc
deleted file mode 100644
index deff6e3..0000000
--- a/audio/time_interval_unittest.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/time_interval.h"
-#include "api/units/time_delta.h"
-#include "rtc_base/fakeclock.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-
-TEST(TimeIntervalTest, TimeInMs) {
-  rtc::ScopedFakeClock fake_clock;
-  TimeInterval interval;
-  interval.Extend();
-  fake_clock.AdvanceTime(TimeDelta::ms(100));
-  interval.Extend();
-  EXPECT_EQ(interval.Length(), 100);
-}
-
-TEST(TimeIntervalTest, Empty) {
-  TimeInterval interval;
-  EXPECT_TRUE(interval.Empty());
-  interval.Extend();
-  EXPECT_FALSE(interval.Empty());
-  interval.Extend(200);
-  EXPECT_FALSE(interval.Empty());
-}
-
-TEST(TimeIntervalTest, MonotoneIncreasing) {
-  const size_t point_count = 7;
-  const int64_t interval_points[] = {3, 2, 5, 0, 4, 1, 6};
-  const int64_t interval_differences[] = {0, 1, 3, 5, 5, 5, 6};
-  TimeInterval interval;
-  EXPECT_TRUE(interval.Empty());
-  for (size_t i = 0; i < point_count; ++i) {
-    interval.Extend(interval_points[i]);
-    EXPECT_EQ(interval_differences[i], interval.Length());
-  }
-}
-
-}  // namespace webrtc
diff --git a/audio/utility/BUILD.gn b/audio/utility/BUILD.gn
index 76c09a5..11a65bd 100644
--- a/audio/utility/BUILD.gn
+++ b/audio/utility/BUILD.gn
@@ -21,7 +21,6 @@
   ]
 
   deps = [
-    "../..:webrtc_common",
     "../../api/audio:audio_frame_api",
     "../../rtc_base:checks",
     "../../rtc_base:rtc_base_approved",
diff --git a/common_audio/BUILD.gn b/common_audio/BUILD.gn
index 911c050..74b0c60 100644
--- a/common_audio/BUILD.gn
+++ b/common_audio/BUILD.gn
@@ -47,7 +47,6 @@
   deps = [
     ":common_audio_c",
     ":sinc_resampler",
-    "..:webrtc_common",
     "../rtc_base:checks",
     "../rtc_base:gtest_prod",
     "../rtc_base:rtc_base_approved",
@@ -182,7 +181,6 @@
   deps = [
     ":common_audio_c_arm_asm",
     ":common_audio_cc",
-    "..:webrtc_common",
     "../rtc_base:checks",
     "../rtc_base:compile_assert_c",
     "../rtc_base:rtc_base_approved",
@@ -202,7 +200,6 @@
   ]
 
   deps = [
-    "..:webrtc_common",
     "../rtc_base:rtc_base_approved",
     "../system_wrappers",
   ]
@@ -213,7 +210,6 @@
     "resampler/sinc_resampler.h",
   ]
   deps = [
-    "..:webrtc_common",
     "../rtc_base:gtest_prod",
     "../rtc_base:rtc_base_approved",
     "../rtc_base/memory:aligned_malloc",
@@ -382,7 +378,6 @@
       ":fir_filter",
       ":fir_filter_factory",
       ":sinc_resampler",
-      "..:webrtc_common",
       "../rtc_base:checks",
       "../rtc_base:rtc_base_approved",
       "../rtc_base:rtc_base_tests_utils",
diff --git a/common_types.h b/common_types.h
index b2fcf17..848b899 100644
--- a/common_types.h
+++ b/common_types.h
@@ -17,8 +17,9 @@
 #include "absl/strings/match.h"
 // TODO(sprang): Remove this include when all usage includes it directly.
 #include "api/video/video_bitrate_allocation.h"
+// TODO(bugs.webrtc.org/7660): Delete include once downstream code is updated.
+#include "api/video/video_codec_type.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/deprecation.h"
 
 #if defined(_MSC_VER)
 // Disable "new behavior: elements of array will be default initialized"
@@ -38,29 +39,6 @@
   kVideoFrameDelta = 4,
 };
 
-// Statistics for an RTCP channel
-struct RtcpStatistics {
-  RtcpStatistics()
-      : fraction_lost(0),
-        packets_lost(0),
-        extended_highest_sequence_number(0),
-        jitter(0) {}
-
-  uint8_t fraction_lost;
-  int32_t packets_lost;  // Defined as a 24 bit signed integer in RTCP
-  uint32_t extended_highest_sequence_number;
-  uint32_t jitter;
-};
-
-class RtcpStatisticsCallback {
- public:
-  virtual ~RtcpStatisticsCallback() {}
-
-  virtual void StatisticsUpdated(const RtcpStatistics& statistics,
-                                 uint32_t ssrc) = 0;
-  virtual void CNameChanged(const char* cname, uint32_t ssrc) = 0;
-};
-
 // Statistics for RTCP packet types.
 struct RtcpPacketTypeCounter {
   RtcpPacketTypeCounter()
@@ -206,80 +184,6 @@
 // RTP
 enum { kRtpCsrcSize = 15 };  // RFC 3550 page 13
 
-// NETEQ statistics.
-struct NetworkStatistics {
-  // current jitter buffer size in ms
-  uint16_t currentBufferSize;
-  // preferred (optimal) buffer size in ms
-  uint16_t preferredBufferSize;
-  // adding extra delay due to "peaky jitter"
-  bool jitterPeaksFound;
-  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
-  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
-  uint64_t totalSamplesReceived;
-  uint64_t concealedSamples;
-  uint64_t concealmentEvents;
-  uint64_t jitterBufferDelayMs;
-  // Stats below DO NOT correspond directly to anything in the WebRTC stats
-  // Loss rate (network + late); fraction between 0 and 1, scaled to Q14.
-  uint16_t currentPacketLossRate;
-  // Late loss rate; fraction between 0 and 1, scaled to Q14.
-  union {
-    RTC_DEPRECATED uint16_t currentDiscardRate;
-  };
-  // fraction (of original stream) of synthesized audio inserted through
-  // expansion (in Q14)
-  uint16_t currentExpandRate;
-  // fraction (of original stream) of synthesized speech inserted through
-  // expansion (in Q14)
-  uint16_t currentSpeechExpandRate;
-  // fraction of synthesized speech inserted through pre-emptive expansion
-  // (in Q14)
-  uint16_t currentPreemptiveRate;
-  // fraction of data removed through acceleration (in Q14)
-  uint16_t currentAccelerateRate;
-  // fraction of data coming from secondary decoding (in Q14)
-  uint16_t currentSecondaryDecodedRate;
-  // Fraction of secondary data, including FEC and RED, that is discarded (in
-  // Q14). Discarding of secondary data can be caused by the reception of the
-  // primary data, obsoleting the secondary data. It can also be caused by early
-  // or late arrival of secondary data.
-  uint16_t currentSecondaryDiscardedRate;
-  // clock-drift in parts-per-million (negative or positive)
-  int32_t clockDriftPPM;
-  // average packet waiting time in the jitter buffer (ms)
-  int meanWaitingTimeMs;
-  // median packet waiting time in the jitter buffer (ms)
-  int medianWaitingTimeMs;
-  // min packet waiting time in the jitter buffer (ms)
-  int minWaitingTimeMs;
-  // max packet waiting time in the jitter buffer (ms)
-  int maxWaitingTimeMs;
-  // added samples in off mode due to packet loss
-  size_t addedSamples;
-};
-
-// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
-struct AudioDecodingCallStats {
-  AudioDecodingCallStats()
-      : calls_to_silence_generator(0),
-        calls_to_neteq(0),
-        decoded_normal(0),
-        decoded_plc(0),
-        decoded_cng(0),
-        decoded_plc_cng(0),
-        decoded_muted_output(0) {}
-
-  int calls_to_silence_generator;  // Number of calls where silence generated,
-                                   // and NetEq was disengaged from decoding.
-  int calls_to_neteq;              // Number of calls to NetEq.
-  int decoded_normal;  // Number of calls where audio RTP packet decoded.
-  int decoded_plc;     // Number of calls resulted in PLC.
-  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
-  int decoded_plc_cng;       // Number of calls resulted where PLC faded to CNG.
-  int decoded_muted_output;  // Number of calls returning a muted state output.
-};
-
 // ==================================================================
 // Video specific types
 // ==================================================================
@@ -318,18 +222,6 @@
 
 }  // namespace H264
 
-// Video codec types
-enum VideoCodecType {
-  // There are various memset(..., 0, ...) calls in the code that rely on
-  // kVideoCodecGeneric being zero.
-  kVideoCodecGeneric = 0,
-  kVideoCodecVP8,
-  kVideoCodecVP9,
-  kVideoCodecH264,
-  kVideoCodecI420,
-  kVideoCodecMultiplex,
-};
-
 struct SpatialLayer {
   bool operator==(const SpatialLayer& other) const;
   bool operator!=(const SpatialLayer& other) const { return !(*this == other); }
diff --git a/cras-config/aec_config.cc b/cras-config/aec_config.cc
index adbd453..d0cf19b 100644
--- a/cras-config/aec_config.cc
+++ b/cras-config/aec_config.cc
@@ -118,6 +118,8 @@
 		AEC_GET_FLOAT(ini, ERLE, MAX_H);
 	config->erle.onset_detection =
 		AEC_GET_INT(ini, ERLE, ONSET_DETECTION);
+	config->erle.num_sections =
+		AEC_GET_INT(ini, ERLE, NUM_SECTIONS);
 
 	config->ep_strength.lf =
 		AEC_GET_FLOAT(ini, EP_STRENGTH, LF);
diff --git a/cras-config/aec_config.h b/cras-config/aec_config.h
index 5318ac9..33185b3 100644
--- a/cras-config/aec_config.h
+++ b/cras-config/aec_config.h
@@ -120,6 +120,8 @@
 #define AEC_ERLE_MAX_H_VALUE 1.5f
 #define AEC_ERLE_ONSET_DETECTION "erle:onset_detection"
 #define AEC_ERLE_ONSET_DETECTION_VALUE 1
+#define AEC_ERLE_NUM_SECTIONS "erle:num_sections"
+#define AEC_ERLE_NUM_SECTIONS_VALUE 1
 
 // EpStrength
 #define AEC_EP_STRENGTH_LF "ep_strength:lf"
@@ -156,10 +158,10 @@
 #define AEC_ECHO_AUDIBILITY_AUDIBILITY_THRESHOLD_HF_VALUE 10
 #define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES \
 	"echo_audibility:use_stationary_properties"
-#define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES_VALUE 1
+#define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES_VALUE 0
 #define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT \
 	 "echo_audibility:use_stationarity_properties_at_init"
-#define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT_VALUE 1
+#define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT_VALUE 0
 
 // Rendering levels
 #define AEC_RENDER_LEVELS_ACTIVE_RENDER_LIMIT \
@@ -279,10 +281,10 @@
 
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD \
 	"suppressor.dominant_nearend_detection:enr_threshold"
-#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD_VALUE 4.f
+#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD_VALUE .25f
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD \
 	"suppressor.dominant_nearend_detection:enr_exit_threshold"
-#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD_VALUE .1f
+#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD_VALUE 10.f
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_SNR_THRESHOLD \
 	"suppressor.dominant_nearend_detection:snr_threshold"
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_SNR_THRESHOLD_VALUE 30.f
diff --git a/cras-config/apm_config.cc b/cras-config/apm_config.cc
index b03f8d5..407025b 100644
--- a/cras-config/apm_config.cc
+++ b/cras-config/apm_config.cc
@@ -18,11 +18,14 @@
 		ini, key,	\
 		key ## _VALUE)
 
+typedef webrtc::AudioProcessing::Config ApConfig;
+
 void apm_config_apply(dictionary *ini, webrtc::AudioProcessing *apm)
 {
-	webrtc::AudioProcessing::Config config;
+	ApConfig config;
 	webrtc::GainControl::Mode agc_mode;
 	webrtc::NoiseSuppression::Level ns_level;
+	int level_estimator;
 
 	if (ini == NULL)
 		return;
@@ -37,12 +40,19 @@
 			APM_GET_FLOAT(ini, APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR);
 	config.gain_controller2.enabled =
 			APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ENABLED);
-	config.gain_controller2.adaptive_digital_mode =
-		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE);
-	config.gain_controller2.extra_saturation_margin_db =
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB);
-	config.gain_controller2.fixed_gain_db =
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_GAIN_DB);
+	config.gain_controller2.adaptive_digital.enabled =
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_ENABLED);
+	config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+		APM_GET_FLOAT(ini, ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB);
+	level_estimator = APM_GET_INT(
+		ini, ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR);
+	config.gain_controller2.adaptive_digital.level_estimator =
+		static_cast<ApConfig::GainController2::LevelEstimator>(
+			level_estimator);
+	config.gain_controller2.adaptive_digital.use_saturation_protector =
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR);
+	config.gain_controller2.fixed_digital.gain_db = 0;
+		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB);
 	apm->ApplyConfig(config);
 
 	apm->gain_control()->set_compression_gain_db(
@@ -73,12 +83,17 @@
 		APM_GET_FLOAT(ini, APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR));
 	syslog(LOG_ERR, "gain_controller2_enabled %u",
 		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ENABLED));
-	syslog(LOG_ERR, "gain_controller2_adaptive_digital_mode %d",
-		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE));
-	syslog(LOG_ERR, "gain_controller2_extra_saturation_margin_db %f",
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB));
-	syslog(LOG_ERR, "gain_controller2_fixed_gain_db %f",
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_GAIN_DB));
+	syslog(LOG_ERR, "adaptive_digital_enabled %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_ENABLED));
+	syslog(LOG_ERR, "adaptive_digital_extra_saturation_margin_db %f",
+		APM_GET_FLOAT(ini,
+			ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB));
+	syslog(LOG_ERR, "adaptive_digital_level_estimator %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR));
+	syslog(LOG_ERR, "adaptive_digital_use_saturation_protector %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR));
+	syslog(LOG_ERR, "gain_controller2_fixed_digital_gain_db %f",
+		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB));
 	syslog(LOG_ERR, "gain_control_compression_gain_db %u",
 		APM_GET_INT(ini, APM_GAIN_CONTROL_COMPRESSION_GAIN_DB));
 	syslog(LOG_ERR, "gain_control_mode %u",
diff --git a/cras-config/apm_config.h b/cras-config/apm_config.h
index a223877..8c2f823 100644
--- a/cras-config/apm_config.h
+++ b/cras-config/apm_config.h
@@ -20,13 +20,23 @@
 #define APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR_VALUE 1.f
 #define APM_GAIN_CONTROLLER2_ENABLED "apm:gain_controller2_enabled"
 #define APM_GAIN_CONTROLLER2_ENABLED_VALUE 0
-#define APM_GAIN_CONTROLLER2_FIXED_GAIN_DB "apm:gain_controller2_fixed_gain_db"
-#define APM_GAIN_CONTROLLER2_FIXED_GAIN_DB_VALUE 0.f
-#define APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE "apm:gain_controller2_adaptive_digital_mode"
-#define APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE_VALUE 1
-#define APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB \
-	"apm:gain_controller2_extra_saturation_margin_db"
-#define APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB_VALUE 2.f
+#define APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB \
+	"apm:gain_controller2_fixed_digital_gain_db"
+#define APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB_VALUE 0.f
+
+/* Keys for AudioProcessing::GainController2 */
+#define ADAPTIVE_DIGITAL_ENABLED "apm:adaptive_digital_enabled"
+#define ADAPTIVE_DIGITAL_ENABLED_VALUE 0
+#define ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR \
+	"apm:adaptive_digital_level_estimator"
+#define ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR_VALUE 0
+#define ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB \
+	"apm:adaptive_digital_extra_saturation_margin_db"
+#define ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB_VALUE 2.f
+#define ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR \
+	"apm:adaptive_digital_use_saturation_protector"
+#define ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR_VALUE 1
+
 #define APM_GAIN_CONTROL_COMPRESSION_GAIN_DB "apm:gain_control_compression_gain_db"
 #define APM_GAIN_CONTROL_COMPRESSION_GAIN_DB_VALUE 9
 /* 0: adaptive analog, 1: adaptive digital, 2: fixed digital */
diff --git a/modules/audio_coding/BUILD.gn b/modules/audio_coding/BUILD.gn
index ec81697..df4ba23 100644
--- a/modules/audio_coding/BUILD.gn
+++ b/modules/audio_coding/BUILD.gn
@@ -14,31 +14,6 @@
 
 visibility = [ ":*" ]
 
-audio_codec_deps = [
-  ":g711",
-  ":pcm16b",
-]
-if (rtc_include_ilbc) {
-  audio_codec_deps += [ ":ilbc" ]
-}
-if (rtc_include_opus) {
-  audio_codec_deps += [ ":webrtc_opus" ]
-}
-if (current_cpu == "arm") {
-  audio_codec_deps += [ ":isac_fix" ]
-} else {
-  audio_codec_deps += [ ":isac" ]
-}
-audio_codec_deps += [ ":g722" ]
-if (!build_with_mozilla && !build_with_chromium) {
-  audio_codec_deps += [ ":red" ]
-}
-audio_coding_deps = audio_codec_deps + [
-                      "../..:webrtc_common",
-                      "../../common_audio",
-                      "../../system_wrappers",
-                    ]
-
 rtc_static_library("audio_format_conversion") {
   visibility += webrtc_default_visibility
   sources = [
@@ -63,7 +38,6 @@
   # TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that
   # client code gets updated.
   visibility += [ "*" ]
-  allow_poison = [ "audio_codecs" ]
 
   sources = [
     "acm2/acm_codec_database.cc",
@@ -72,22 +46,18 @@
     "acm2/rent_a_codec.h",
   ]
   deps = [
-           "../../rtc_base:checks",
-           "../../api:array_view",
-           "//third_party/abseil-cpp/absl/strings",
-           "//third_party/abseil-cpp/absl/types:optional",
-           "../../api/audio_codecs:audio_codecs_api",
-           "../..:webrtc_common",
-           "../../rtc_base:protobuf_utils",
-           "../../rtc_base:rtc_base_approved",
-           "../../system_wrappers",
-           ":audio_coding_module_typedefs",
-           ":isac_common",
-           ":isac_fix_c",
-           ":audio_encoder_cng",
-           ":neteq_decoder_enum",
-         ] + audio_codec_deps
-
+    ":audio_coding_module_typedefs",
+    ":neteq_decoder_enum",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:protobuf_utils",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+    "//third_party/abseil-cpp/absl/strings",
+    "//third_party/abseil-cpp/absl/types:optional",
+  ]
   defines = audio_codec_defines
 }
 
@@ -97,12 +67,12 @@
   ]
   deps = [
     "../..:webrtc_common",
+    "../../rtc_base:deprecation",
   ]
 }
 
 rtc_static_library("audio_coding") {
   visibility += [ "*" ]
-  allow_poison = [ "audio_codecs" ]  # TODO(bugs.webrtc.org/8396): Remove.
   sources = [
     "acm2/acm_receiver.cc",
     "acm2/acm_receiver.h",
@@ -111,40 +81,34 @@
     "acm2/audio_coding_module.cc",
     "acm2/call_statistics.cc",
     "acm2/call_statistics.h",
-    "acm2/codec_manager.cc",
-    "acm2/codec_manager.h",
     "include/audio_coding_module.h",
   ]
 
   defines = []
 
-  if (rtc_include_opus) {
-    public_deps = [
-      ":webrtc_opus",
-    ]
-  }
-
-  deps = audio_coding_deps + [
-           "../../system_wrappers:metrics",
-           "../../api/audio:audio_frame_api",
-           "..:module_api",
-           "..:module_api_public",
-           "../../common_audio:common_audio_c",
-           "../../rtc_base:deprecation",
-           "../../rtc_base:checks",
-           "../../api:array_view",
-           "../../api/audio_codecs:audio_codecs_api",
-           ":audio_coding_module_typedefs",
-           ":neteq",
-           ":neteq_decoder_enum",
-           ":rent_a_codec",
-           "../../rtc_base:audio_format_to_string",
-           "../../rtc_base:rtc_base_approved",
-           "//third_party/abseil-cpp/absl/strings",
-           "//third_party/abseil-cpp/absl/types:optional",
-           "../../logging:rtc_event_log_api",
-         ]
-  defines = audio_coding_defines
+  deps = [
+    ":audio_coding_module_typedefs",
+    ":neteq",
+    ":neteq_decoder_enum",
+    ":rent_a_codec",
+    "..:module_api",
+    "..:module_api_public",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api/audio:audio_frame_api",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio:common_audio",
+    "../../common_audio:common_audio_c",
+    "../../logging:rtc_event_log_api",
+    "../../rtc_base:audio_format_to_string",
+    "../../rtc_base:checks",
+    "../../rtc_base:deprecation",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+    "../../system_wrappers:metrics",
+    "//third_party/abseil-cpp/absl/strings",
+    "//third_party/abseil-cpp/absl/types:optional",
+  ]
 }
 
 rtc_static_library("legacy_encoded_audio_frame") {
@@ -910,7 +874,7 @@
     proto_out_dir = "modules/audio_coding/audio_network_adaptor"
   }
   proto_library("ana_config_proto") {
-    visibility += webrtc_default_visibility
+    visibility += [ "*" ]
     sources = [
       "audio_network_adaptor/config.proto",
     ]
@@ -1061,8 +1025,6 @@
     "neteq/random_vector.h",
     "neteq/red_payload_splitter.cc",
     "neteq/red_payload_splitter.h",
-    "neteq/rtcp.cc",
-    "neteq/rtcp.h",
     "neteq/statistics_calculator.cc",
     "neteq/statistics_calculator.h",
     "neteq/sync_buffer.cc",
@@ -1172,7 +1134,6 @@
     "../../rtc_base:checks",
     "../../rtc_base:rtc_base",
     "../../rtc_base:rtc_base_approved",
-    "../../rtc_base:rtc_base_tests_utils",
     "../../rtc_base/system:arch",
     "../../test:rtp_test_utils",
     "../rtp_rtcp",
@@ -1280,6 +1241,30 @@
 }
 
 if (rtc_include_tests) {
+  audio_coding_deps = [
+    "../../common_audio",
+    "../../system_wrappers",
+    "../..:webrtc_common",
+    ":audio_encoder_cng",
+    ":g711",
+    ":g722",
+    ":pcm16b",
+  ]
+  if (rtc_include_ilbc) {
+    audio_coding_deps += [ ":ilbc" ]
+  }
+  if (rtc_include_opus) {
+    audio_coding_deps += [ ":webrtc_opus" ]
+  }
+  if (current_cpu == "arm") {
+    audio_coding_deps += [ ":isac_fix" ]
+  } else {
+    audio_coding_deps += [ ":isac" ]
+  }
+  if (!build_with_mozilla && !build_with_chromium) {
+    audio_coding_deps += [ ":red" ]
+  }
+
   rtc_source_set("mocks") {
     testonly = true
     sources = [
@@ -1368,6 +1353,7 @@
       ":audio_format_conversion",
       ":pcm16b_c",
       ":red",
+      ":webrtc_opus_c",
       "..:module_api",
       "../..:webrtc_common",
       "../../api/audio:audio_frame_api",
@@ -2007,8 +1993,6 @@
       "acm2/acm_receiver_unittest.cc",
       "acm2/audio_coding_module_unittest.cc",
       "acm2/call_statistics_unittest.cc",
-      "acm2/codec_manager_unittest.cc",
-      "acm2/rent_a_codec_unittest.cc",
       "audio_network_adaptor/audio_network_adaptor_impl_unittest.cc",
       "audio_network_adaptor/bitrate_controller_unittest.cc",
       "audio_network_adaptor/channel_controller_unittest.cc",
@@ -2119,6 +2103,7 @@
       "../../logging:mocks",
       "../../logging:rtc_event_audio",
       "../../logging:rtc_event_log_api",
+      "../../modules/rtp_rtcp:rtp_rtcp_format",
       "../../rtc_base:checks",
       "../../rtc_base:protobuf_utils",
       "../../rtc_base:rtc_base",
diff --git a/modules/audio_coding/acm2/acm_codec_database.cc b/modules/audio_coding/acm2/acm_codec_database.cc
index 879082c..cada80c 100644
--- a/modules/audio_coding/acm2/acm_codec_database.cc
+++ b/modules/audio_coding/acm2/acm_codec_database.cc
@@ -298,7 +298,7 @@
 int ACMCodecDB::CodecId(const char* payload_name,
                         int frequency,
                         size_t channels) {
-  for (const CodecInst& ci : RentACodec::Database()) {
+  for (const CodecInst& ci : database_) {
     bool name_match = false;
     bool frequency_match = false;
     bool channels_match = false;
@@ -318,7 +318,7 @@
 
     if (name_match && frequency_match && channels_match) {
       // We have found a matching codec in the list.
-      return &ci - RentACodec::Database().data();
+      return &ci - database_;
     }
   }
 
diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc
index 3411d90..d3af7c0 100644
--- a/modules/audio_coding/acm2/acm_receiver.cc
+++ b/modules/audio_coding/acm2/acm_receiver.cc
@@ -18,7 +18,6 @@
 #include "absl/strings/match.h"
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/audio_decoder.h"
-#include "common_types.h"
 #include "modules/audio_coding/acm2/acm_resampler.h"
 #include "modules/audio_coding/acm2/call_statistics.h"
 #include "modules/audio_coding/acm2/rent_a_codec.h"
@@ -346,6 +345,13 @@
   acm_stat->concealedSamples = neteq_lifetime_stat.concealed_samples;
   acm_stat->concealmentEvents = neteq_lifetime_stat.concealment_events;
   acm_stat->jitterBufferDelayMs = neteq_lifetime_stat.jitter_buffer_delay_ms;
+  acm_stat->delayedPacketOutageSamples =
+      neteq_lifetime_stat.delayed_packet_outage_samples;
+
+  NetEqOperationsAndState neteq_operations_and_state =
+      neteq_->GetOperationsAndState();
+  acm_stat->packetBufferFlushes =
+      neteq_operations_and_state.packet_buffer_flushes;
 }
 
 int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc
index 334c0e0..c0aab3a 100644
--- a/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/modules/audio_coding/acm2/audio_coding_module.cc
@@ -18,7 +18,6 @@
 #include "api/array_view.h"
 #include "modules/audio_coding/acm2/acm_receiver.h"
 #include "modules/audio_coding/acm2/acm_resampler.h"
-#include "modules/audio_coding/acm2/codec_manager.h"
 #include "modules/audio_coding/acm2/rent_a_codec.h"
 #include "modules/include/module_common_types.h"
 #include "modules/include/module_common_types_public.h"
@@ -34,12 +33,6 @@
 
 namespace {
 
-struct EncoderFactory {
-  AudioEncoder* external_speech_encoder = nullptr;
-  acm2::CodecManager codec_manager;
-  acm2::RentACodec rent_a_codec;
-};
-
 class AudioCodingModuleImpl final : public AudioCodingModule {
  public:
   explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
@@ -49,12 +42,6 @@
   //   Sender
   //
 
-  // Can be called multiple times for Codec, CNG, RED.
-  int RegisterSendCodec(const CodecInst& send_codec) override;
-
-  void RegisterExternalSendCodec(
-      AudioEncoder* external_speech_encoder) override;
-
   void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
                          modifier) override;
 
@@ -74,25 +61,9 @@
   int Add10MsData(const AudioFrame& audio_frame) override;
 
   /////////////////////////////////////////
-  // (RED) Redundant Coding
-  //
-
-  // Configure RED status i.e. on/off.
-  int SetREDStatus(bool enable_red) override;
-
-  // Get RED status.
-  bool REDStatus() const override;
-
-  /////////////////////////////////////////
   // (FEC) Forward Error Correction (codec internal)
   //
 
-  // Configure FEC status i.e. on/off.
-  int SetCodecFEC(bool enabled_codec_fec) override;
-
-  // Get FEC status.
-  bool CodecFEC() const override;
-
   // Set target packet loss rate
   int SetPacketLossRate(int loss_rate) override;
 
@@ -102,14 +73,6 @@
   //   (CNG) Comfort Noise Generation
   //
 
-  int SetVAD(bool enable_dtx = true,
-             bool enable_vad = false,
-             ACMVADMode mode = VADNormal) override;
-
-  int VAD(bool* dtx_enabled,
-          bool* vad_enabled,
-          ACMVADMode* mode) const override;
-
   int RegisterVADCallback(ACMVADCallback* vad_callback) override;
 
   /////////////////////////////////////////
@@ -130,11 +93,6 @@
   bool RegisterReceiveCodec(int rtp_payload_type,
                             const SdpAudioFormat& audio_format) override;
 
-  int RegisterReceiveCodec(const CodecInst& receive_codec) override;
-  int RegisterReceiveCodec(
-      const CodecInst& receive_codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) override;
-
   int RegisterExternalReceiveCodec(int rtp_payload_type,
                                    AudioDecoder* external_decoder,
                                    int sample_rate_hz,
@@ -222,11 +180,6 @@
     const std::string histogram_name_;
   };
 
-  int RegisterReceiveCodecUnlocked(
-      const CodecInst& codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory)
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
-
   int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
   int Encode(const InputData& input_data)
@@ -264,12 +217,7 @@
   acm2::AcmReceiver receiver_;  // AcmReceiver has it's own internal lock.
   ChangeLogger bitrate_logger_ RTC_GUARDED_BY(acm_crit_sect_);
 
-  std::unique_ptr<EncoderFactory> encoder_factory_
-      RTC_GUARDED_BY(acm_crit_sect_);
-
-  // Current encoder stack, either obtained from
-  // encoder_factory_->rent_a_codec.RentEncoderStack or provided by a call to
-  // RegisterEncoder.
+  // Current encoder stack, provided by a call to RegisterEncoder.
   std::unique_ptr<AudioEncoder> encoder_stack_ RTC_GUARDED_BY(acm_crit_sect_);
 
   std::unique_ptr<AudioDecoder> isac_decoder_16k_
@@ -405,28 +353,6 @@
   AudioEncoder* enc_;
 };
 
-// Return false on error.
-bool CreateSpeechEncoderIfNecessary(EncoderFactory* ef) {
-  auto* sp = ef->codec_manager.GetStackParams();
-  if (sp->speech_encoder) {
-    // Do nothing; we already have a speech encoder.
-  } else if (ef->codec_manager.GetCodecInst()) {
-    RTC_DCHECK(!ef->external_speech_encoder);
-    // We have no speech encoder, but we have a specification for making one.
-    std::unique_ptr<AudioEncoder> enc =
-        ef->rent_a_codec.RentEncoder(*ef->codec_manager.GetCodecInst());
-    if (!enc)
-      return false;  // Encoder spec was bad.
-    sp->speech_encoder = std::move(enc);
-  } else if (ef->external_speech_encoder) {
-    RTC_DCHECK(!ef->codec_manager.GetCodecInst());
-    // We have an external speech encoder.
-    sp->speech_encoder = std::unique_ptr<AudioEncoder>(
-        new RawAudioEncoderWrapper(ef->external_speech_encoder));
-  }
-  return true;
-}
-
 void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
   if (value != last_value_ || first_time_) {
     first_time_ = false;
@@ -441,7 +367,6 @@
       expected_in_ts_(0xD87F3F9F),
       receiver_(config),
       bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
-      encoder_factory_(new EncoderFactory),
       encoder_stack_(nullptr),
       previous_pltype_(255),
       receiver_initialized_(false),
@@ -549,69 +474,29 @@
 //   Sender
 //
 
-// Can be called multiple times for Codec, CNG, RED.
-int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  if (!encoder_factory_->codec_manager.RegisterEncoder(send_codec)) {
-    return -1;
-  }
-  if (encoder_factory_->codec_manager.GetCodecInst()) {
-    encoder_factory_->external_speech_encoder = nullptr;
-  }
-  if (!CreateSpeechEncoderIfNecessary(encoder_factory_.get())) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-}
-
-void AudioCodingModuleImpl::RegisterExternalSendCodec(
-    AudioEncoder* external_speech_encoder) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  encoder_factory_->codec_manager.UnsetCodecInst();
-  encoder_factory_->external_speech_encoder = external_speech_encoder;
-  RTC_CHECK(CreateSpeechEncoderIfNecessary(encoder_factory_.get()));
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  RTC_CHECK(sp->speech_encoder);
-  encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-}
-
 void AudioCodingModuleImpl::ModifyEncoder(
     rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
   rtc::CritScope lock(&acm_crit_sect_);
-
-  // Wipe the encoder factory, so that everything that relies on it will fail.
-  // We don't want the complexity of supporting swapping back and forth.
-  if (encoder_factory_) {
-    encoder_factory_.reset();
-    RTC_CHECK(!encoder_stack_);  // Ensure we hadn't started using the factory.
-  }
-
   modifier(&encoder_stack_);
 }
 
 // Get current send codec.
 absl::optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
   rtc::CritScope lock(&acm_crit_sect_);
-  if (encoder_factory_) {
-    auto* ci = encoder_factory_->codec_manager.GetCodecInst();
-    if (ci) {
-      return *ci;
-    }
-    CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-    const std::unique_ptr<AudioEncoder>& enc =
-        encoder_factory_->codec_manager.GetStackParams()->speech_encoder;
-    if (enc) {
-      return acm2::CodecManager::ForgeCodecInst(enc.get());
-    }
-    return absl::nullopt;
+  if (encoder_stack_) {
+    CodecInst ci;
+    ci.channels = encoder_stack_->NumChannels();
+    ci.plfreq = encoder_stack_->SampleRateHz();
+    ci.pacsize = rtc::CheckedDivExact(
+        static_cast<int>(encoder_stack_->Max10MsFramesInAPacket() * ci.plfreq),
+        100);
+    ci.pltype = -1;  // Not valid.
+    ci.rate = -1;    // Not valid.
+    static const char kName[] = "external";
+    memcpy(ci.plname, kName, sizeof(kName));
+    return ci;
   } else {
-    return encoder_stack_
-               ? absl::optional<CodecInst>(
-                     acm2::CodecManager::ForgeCodecInst(encoder_stack_.get()))
-               : absl::nullopt;
+    return absl::nullopt;
   }
 }
 
@@ -809,58 +694,9 @@
 }
 
 /////////////////////////////////////////
-//   (RED) Redundant Coding
-//
-
-bool AudioCodingModuleImpl::REDStatus() const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return encoder_factory_->codec_manager.GetStackParams()->use_red;
-}
-
-// Configure RED status i.e on/off.
-int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
-#ifdef WEBRTC_CODEC_RED
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetCopyRed(enable_red)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-#else
-  RTC_LOG(LS_WARNING) << "  WEBRTC_CODEC_RED is undefined";
-  return -1;
-#endif
-}
-
-/////////////////////////////////////////
 //   (FEC) Forward Error Correction (codec internal)
 //
 
-bool AudioCodingModuleImpl::CodecFEC() const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return encoder_factory_->codec_manager.GetStackParams()->use_codec_fec;
-}
-
-int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetCodecFEC(enable_codec_fec)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  if (enable_codec_fec) {
-    return sp->use_codec_fec ? 0 : -1;
-  } else {
-    RTC_DCHECK(!sp->use_codec_fec);
-    return 0;
-  }
-}
-
 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
   rtc::CritScope lock(&acm_crit_sect_);
   if (HaveValidEncoder("SetPacketLossRate")) {
@@ -870,36 +706,6 @@
 }
 
 /////////////////////////////////////////
-//   (VAD) Voice Activity Detection
-//
-int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
-                                  bool enable_vad,
-                                  ACMVADMode mode) {
-  // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
-  RTC_DCHECK_EQ(enable_dtx, enable_vad);
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetVAD(enable_dtx, mode)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-}
-
-// Get VAD/DTX settings.
-int AudioCodingModuleImpl::VAD(bool* dtx_enabled,
-                               bool* vad_enabled,
-                               ACMVADMode* mode) const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  const auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  *dtx_enabled = *vad_enabled = sp->use_cng;
-  *mode = sp->vad_mode;
-  return 0;
-}
-
-/////////////////////////////////////////
 //   Receiver
 //
 
@@ -957,59 +763,6 @@
   return receiver_.AddCodec(rtp_payload_type, audio_format);
 }
 
-int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  auto* ef = encoder_factory_.get();
-  return RegisterReceiveCodecUnlocked(
-      codec, [&] { return ef->rent_a_codec.RentIsacDecoder(codec.plfreq); });
-}
-
-int AudioCodingModuleImpl::RegisterReceiveCodec(
-    const CodecInst& codec,
-    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return RegisterReceiveCodecUnlocked(codec, isac_factory);
-}
-
-int AudioCodingModuleImpl::RegisterReceiveCodecUnlocked(
-    const CodecInst& codec,
-    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
-  RTC_DCHECK(receiver_initialized_);
-  if (codec.channels > 2) {
-    RTC_LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
-    return -1;
-  }
-
-  auto codec_id = acm2::RentACodec::CodecIdByParams(codec.plname, codec.plfreq,
-                                                    codec.channels);
-  if (!codec_id) {
-    RTC_LOG_F(LS_ERROR)
-        << "Wrong codec params to be registered as receive codec";
-    return -1;
-  }
-  auto codec_index = acm2::RentACodec::CodecIndexFromId(*codec_id);
-  RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
-
-  // Check if the payload-type is valid.
-  if (!acm2::RentACodec::IsPayloadTypeValid(codec.pltype)) {
-    RTC_LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
-                        << codec.plname;
-    return -1;
-  }
-
-  AudioDecoder* isac_decoder = nullptr;
-  if (absl::EqualsIgnoreCase(codec.plname, "isac")) {
-    std::unique_ptr<AudioDecoder>& saved_isac_decoder =
-        codec.plfreq == 16000 ? isac_decoder_16k_ : isac_decoder_32k_;
-    if (!saved_isac_decoder) {
-      saved_isac_decoder = isac_factory();
-    }
-    isac_decoder = saved_isac_decoder.get();
-  }
-  return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
-                            codec.plfreq, isac_decoder, codec.plname);
-}
-
 int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
     int rtp_payload_type,
     AudioDecoder* external_decoder,
diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
index b227cfb..4e262f7 100644
--- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -1183,14 +1183,14 @@
 
     // Extract and verify the audio checksum.
     std::string checksum_string = audio_checksum.Finish();
-    EXPECT_EQ(audio_checksum_ref, checksum_string);
+    ExpectChecksumEq(audio_checksum_ref, checksum_string);
 
     // Extract and verify the payload checksum.
     rtc::Buffer checksum_result(payload_checksum_->Size());
     payload_checksum_->Finish(checksum_result.data(), checksum_result.size());
     checksum_string =
         rtc::hex_encode(checksum_result.data<char>(), checksum_result.size());
-    EXPECT_EQ(payload_checksum_ref, checksum_string);
+    ExpectChecksumEq(payload_checksum_ref, checksum_string);
 
     // Verify number of packets produced.
     EXPECT_EQ(expected_packets, packet_count_);
@@ -1199,6 +1199,18 @@
     remove(output_file_name.c_str());
   }
 
+  // Helper: result must be one the "|"-separated checksums.
+  void ExpectChecksumEq(std::string ref, std::string result) {
+    if (ref.size() == result.size()) {
+      // Only one checksum: clearer message.
+      EXPECT_EQ(ref, result);
+    } else {
+      EXPECT_NE(ref.find(result), std::string::npos)
+          << result << " must be one of these:\n"
+          << ref;
+    }
+  }
+
   // Inherited from test::PacketSource.
   std::unique_ptr<test::Packet> NextPacket() override {
     auto packet = send_test_->NextPacket();
@@ -1436,21 +1448,35 @@
       50, test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
+namespace {
+// Checksum depends on libopus being compiled with or without SSE.
+const std::string audio_maybe_sse =
+    "3e285b74510e62062fbd8142dacd16e9|"
+    "fd5d57d6d766908e6a7211e2a5c7f78a";
+const std::string payload_maybe_sse =
+    "78cf8f03157358acdc69f6835caa0d9b|"
+    "b693bd95c2ee2354f92340dd09e9da68";
+// Common checksums.
+const std::string audio_checksum =
+    AcmReceiverBitExactnessOldApi::PlatformChecksum(
+        audio_maybe_sse,
+        audio_maybe_sse,
+        "439e97ad1932c49923b5da029c17dd5e",
+        "038ec90f5f3fc2320f3090f8ecef6bb7",
+        "038ec90f5f3fc2320f3090f8ecef6bb7");
+const std::string payload_checksum =
+    AcmReceiverBitExactnessOldApi::PlatformChecksum(
+        payload_maybe_sse,
+        payload_maybe_sse,
+        "ab88b1a049c36bdfeb7e8b057ef6982a",
+        "27fef7b799393347ec3b5694369a1c36",
+        "27fef7b799393347ec3b5694369a1c36");
+};  // namespace
+
 TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
   ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
-  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "3e285b74510e62062fbd8142dacd16e9",
-          "3e285b74510e62062fbd8142dacd16e9",
-          "439e97ad1932c49923b5da029c17dd5e",
-          "038ec90f5f3fc2320f3090f8ecef6bb7",
-          "038ec90f5f3fc2320f3090f8ecef6bb7"),
-      AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "ab88b1a049c36bdfeb7e8b057ef6982a",
-          "27fef7b799393347ec3b5694369a1c36",
-          "27fef7b799393347ec3b5694369a1c36"),
-      50, test::AcmReceiveTestOldApi::kStereoOutput);
+  Run(audio_checksum, payload_checksum, 50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
 TEST_F(AcmSenderBitExactnessNewApi, MAYBE_OpusFromFormat_stereo_20ms) {
@@ -1458,19 +1484,8 @@
       SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
   ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
       AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
-  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "3e285b74510e62062fbd8142dacd16e9",
-          "3e285b74510e62062fbd8142dacd16e9",
-          "439e97ad1932c49923b5da029c17dd5e",
-          "038ec90f5f3fc2320f3090f8ecef6bb7",
-          "038ec90f5f3fc2320f3090f8ecef6bb7"),
-      AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "ab88b1a049c36bdfeb7e8b057ef6982a",
-          "27fef7b799393347ec3b5694369a1c36",
-          "27fef7b799393347ec3b5694369a1c36"),
-      50, test::AcmReceiveTestOldApi::kStereoOutput);
+  Run(audio_checksum, payload_checksum, 50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
 TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms_voip) {
@@ -1480,15 +1495,19 @@
   config->application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
   ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
       AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
+  // Checksum depends on libopus being compiled with or without SSE.
+  const std::string audio_maybe_sse =
+      "b0325df4e8104f04e03af23c0b75800e|"
+      "3cd4e1bc2acd9440bb9e97af34080ffc";
+  const std::string payload_maybe_sse =
+      "4eab2259b6fe24c22dd242a113e0b3d9|"
+      "4fc0af0aa06c26454af09832d3ec1b4e";
   Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "b0325df4e8104f04e03af23c0b75800e",
-          "b0325df4e8104f04e03af23c0b75800e",
-          "1c81121f5d9286a5a865d01dbab22ce8",
+          audio_maybe_sse, audio_maybe_sse, "1c81121f5d9286a5a865d01dbab22ce8",
           "11d547f89142e9ef03f37d7ca7f32379",
           "11d547f89142e9ef03f37d7ca7f32379"),
       AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "4eab2259b6fe24c22dd242a113e0b3d9",
-          "4eab2259b6fe24c22dd242a113e0b3d9",
+          payload_maybe_sse, payload_maybe_sse,
           "839ea60399447268ee0f0262a50b75fd",
           "1815fd5589cad0c6f6cf946c76b81aeb",
           "1815fd5589cad0c6f6cf946c76b81aeb"),
diff --git a/modules/audio_coding/acm2/call_statistics.h b/modules/audio_coding/acm2/call_statistics.h
index 9dced64..5d94ac4 100644
--- a/modules/audio_coding/acm2/call_statistics.h
+++ b/modules/audio_coding/acm2/call_statistics.h
@@ -12,7 +12,7 @@
 #define MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
 
 #include "api/audio/audio_frame.h"
-#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
 
 //
 // This class is for book keeping of calls to ACM. It is not useful to log API
diff --git a/modules/audio_coding/acm2/codec_manager.cc b/modules/audio_coding/acm2/codec_manager.cc
deleted file mode 100644
index eda6555..0000000
--- a/modules/audio_coding/acm2/codec_manager.cc
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_coding/acm2/codec_manager.h"
-
-#include <string.h>
-#include <map>
-#include <memory>
-#include <utility>
-
-#include "absl/strings/match.h"
-#include "api/array_view.h"
-#include "api/audio_codecs/audio_encoder.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/logging.h"
-
-namespace webrtc {
-namespace acm2 {
-
-namespace {
-
-// Check if the given codec is a valid to be registered as send codec.
-int IsValidSendCodec(const CodecInst& send_codec) {
-  if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
-    RTC_LOG(LS_ERROR) << "Wrong number of channels (" << send_codec.channels
-                      << "), only mono and stereo are supported)";
-    return -1;
-  }
-
-  auto maybe_codec_id = RentACodec::CodecIdByInst(send_codec);
-  if (!maybe_codec_id) {
-    RTC_LOG(LS_ERROR) << "Invalid codec setting for the send codec.";
-    return -1;
-  }
-
-  // Telephone-event cannot be a send codec.
-  if (absl::EqualsIgnoreCase(send_codec.plname, "telephone-event")) {
-    RTC_LOG(LS_ERROR) << "telephone-event cannot be a send codec";
-    return -1;
-  }
-
-  if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
-           .value_or(false)) {
-    RTC_LOG(LS_ERROR) << send_codec.channels
-                      << " number of channels not supported for "
-                      << send_codec.plname << ".";
-    return -1;
-  }
-  return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
-}
-
-bool IsOpus(const CodecInst& codec) {
-  return
-#ifdef WEBRTC_CODEC_OPUS
-      absl::EqualsIgnoreCase(codec.plname, "opus") ||
-#endif
-      false;
-}
-
-}  // namespace
-
-CodecManager::CodecManager() {
-  thread_checker_.DetachFromThread();
-}
-
-CodecManager::~CodecManager() = default;
-
-bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
-  RTC_DCHECK(thread_checker_.CalledOnValidThread());
-  int codec_id = IsValidSendCodec(send_codec);
-
-  // Check for reported errors from function IsValidSendCodec().
-  if (codec_id < 0) {
-    return false;
-  }
-
-  switch (RentACodec::RegisterRedPayloadType(
-      &codec_stack_params_.red_payload_types, send_codec)) {
-    case RentACodec::RegistrationResult::kOk:
-      return true;
-    case RentACodec::RegistrationResult::kBadFreq:
-      RTC_LOG(LS_ERROR)
-          << "RegisterSendCodec() failed, invalid frequency for RED"
-             " registration";
-      return false;
-    case RentACodec::RegistrationResult::kSkip:
-      break;
-  }
-  switch (RentACodec::RegisterCngPayloadType(
-      &codec_stack_params_.cng_payload_types, send_codec)) {
-    case RentACodec::RegistrationResult::kOk:
-      return true;
-    case RentACodec::RegistrationResult::kBadFreq:
-      RTC_LOG(LS_ERROR)
-          << "RegisterSendCodec() failed, invalid frequency for CNG"
-             " registration";
-      return false;
-    case RentACodec::RegistrationResult::kSkip:
-      break;
-  }
-
-  if (IsOpus(send_codec)) {
-    // VAD/DTX not supported.
-    codec_stack_params_.use_cng = false;
-  }
-
-  send_codec_inst_ = send_codec;
-  recreate_encoder_ = true;  // Caller must recreate it.
-  return true;
-}
-
-CodecInst CodecManager::ForgeCodecInst(
-    const AudioEncoder* external_speech_encoder) {
-  CodecInst ci;
-  ci.channels = external_speech_encoder->NumChannels();
-  ci.plfreq = external_speech_encoder->SampleRateHz();
-  ci.pacsize = rtc::CheckedDivExact(
-      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
-                       ci.plfreq),
-      100);
-  ci.pltype = -1;  // Not valid.
-  ci.rate = -1;    // Not valid.
-  static const char kName[] = "external";
-  memcpy(ci.plname, kName, sizeof(kName));
-  return ci;
-}
-
-bool CodecManager::SetCopyRed(bool enable) {
-  if (enable && codec_stack_params_.use_codec_fec) {
-    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
-    return false;
-  }
-  if (enable && send_codec_inst_ &&
-      codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
-          1) {
-    RTC_LOG(LS_WARNING) << "Cannot enable RED at " << send_codec_inst_->plfreq
-                        << " Hz.";
-    return false;
-  }
-  codec_stack_params_.use_red = enable;
-  return true;
-}
-
-bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
-  // Sanity check of the mode.
-  RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
-             mode == VADVeryAggr);
-
-  // Check that the send codec is mono. We don't support VAD/DTX for stereo
-  // sending.
-  const bool stereo_send =
-      codec_stack_params_.speech_encoder
-          ? (codec_stack_params_.speech_encoder->NumChannels() != 1)
-          : false;
-  if (enable && stereo_send) {
-    RTC_LOG(LS_ERROR) << "VAD/DTX not supported for stereo sending";
-    return false;
-  }
-
-  // TODO(kwiberg): This doesn't protect Opus when injected as an external
-  // encoder.
-  if (send_codec_inst_ && IsOpus(*send_codec_inst_)) {
-    // VAD/DTX not supported, but don't fail.
-    enable = false;
-  }
-
-  codec_stack_params_.use_cng = enable;
-  codec_stack_params_.vad_mode = mode;
-  return true;
-}
-
-bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
-  if (enable_codec_fec && codec_stack_params_.use_red) {
-    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
-    return false;
-  }
-
-  codec_stack_params_.use_codec_fec = enable_codec_fec;
-  return true;
-}
-
-bool CodecManager::MakeEncoder(RentACodec* rac, AudioCodingModule* acm) {
-  RTC_DCHECK(rac);
-  RTC_DCHECK(acm);
-
-  if (!recreate_encoder_) {
-    bool error = false;
-    // Try to re-use the speech encoder we've given to the ACM.
-    acm->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-      if (!*encoder) {
-        // There is no existing encoder.
-        recreate_encoder_ = true;
-        return;
-      }
-
-      // Extract the speech encoder from the ACM.
-      std::unique_ptr<AudioEncoder> enc = std::move(*encoder);
-      while (true) {
-        auto sub_enc = enc->ReclaimContainedEncoders();
-        if (sub_enc.empty()) {
-          break;
-        }
-        RTC_CHECK_EQ(1, sub_enc.size());
-
-        // Replace enc with its sub encoder. We need to put the sub encoder in
-        // a temporary first, since otherwise the old value of enc would be
-        // destroyed before the new value got assigned, which would be bad
-        // since the new value is a part of the old value.
-        auto tmp_enc = std::move(sub_enc[0]);
-        enc = std::move(tmp_enc);
-      }
-
-      // Wrap it in a new encoder stack and put it back.
-      codec_stack_params_.speech_encoder = std::move(enc);
-      *encoder = rac->RentEncoderStack(&codec_stack_params_);
-      if (!*encoder) {
-        error = true;
-      }
-    });
-    if (error) {
-      return false;
-    }
-    if (!recreate_encoder_) {
-      return true;
-    }
-  }
-
-  if (!send_codec_inst_) {
-    // We don't have the information we need to create a new speech encoder.
-    // (This is not an error.)
-    return true;
-  }
-
-  codec_stack_params_.speech_encoder = rac->RentEncoder(*send_codec_inst_);
-  auto stack = rac->RentEncoderStack(&codec_stack_params_);
-  if (!stack) {
-    return false;
-  }
-  acm->SetEncoder(std::move(stack));
-  recreate_encoder_ = false;
-  return true;
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/codec_manager.h b/modules/audio_coding/acm2/codec_manager.h
deleted file mode 100644
index 22dbf4e..0000000
--- a/modules/audio_coding/acm2/codec_manager.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
-#define MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
-
-#include "absl/types/optional.h"
-#include "common_types.h"  // NOLINT(build/include)
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class AudioEncoder;
-
-namespace acm2 {
-
-class CodecManager final {
- public:
-  CodecManager();
-  ~CodecManager();
-
-  // Parses the given specification. On success, returns true and updates the
-  // stored CodecInst and stack parameters; on error, returns false.
-  bool RegisterEncoder(const CodecInst& send_codec);
-
-  static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
-
-  const CodecInst* GetCodecInst() const {
-    return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
-  }
-
-  void UnsetCodecInst() { send_codec_inst_ = absl::nullopt; }
-
-  const RentACodec::StackParameters* GetStackParams() const {
-    return &codec_stack_params_;
-  }
-  RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
-
-  bool SetCopyRed(bool enable);
-
-  bool SetVAD(bool enable, ACMVADMode mode);
-
-  bool SetCodecFEC(bool enable_codec_fec);
-
-  // Uses the provided Rent-A-Codec to create a new encoder stack, if we have a
-  // complete specification; if so, it is then passed to set_encoder. On error,
-  // returns false.
-  bool MakeEncoder(RentACodec* rac, AudioCodingModule* acm);
-
- private:
-  rtc::ThreadChecker thread_checker_;
-  absl::optional<CodecInst> send_codec_inst_;
-  RentACodec::StackParameters codec_stack_params_;
-  bool recreate_encoder_ = true;  // Need to recreate encoder?
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
-};
-
-}  // namespace acm2
-}  // namespace webrtc
-#endif  // MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
diff --git a/modules/audio_coding/acm2/codec_manager_unittest.cc b/modules/audio_coding/acm2/codec_manager_unittest.cc
deleted file mode 100644
index 6a5ea5f..0000000
--- a/modules/audio_coding/acm2/codec_manager_unittest.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "modules/audio_coding/acm2/codec_manager.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "test/gtest.h"
-#include "test/mock_audio_encoder.h"
-
-namespace webrtc {
-namespace acm2 {
-
-using ::testing::Return;
-
-namespace {
-
-// Create a MockAudioEncoder with some reasonable default behavior.
-std::unique_ptr<MockAudioEncoder> CreateMockEncoder() {
-  auto enc = std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
-  EXPECT_CALL(*enc, SampleRateHz()).WillRepeatedly(Return(8000));
-  EXPECT_CALL(*enc, NumChannels()).WillRepeatedly(Return(1));
-  EXPECT_CALL(*enc, Max10MsFramesInAPacket()).WillRepeatedly(Return(1));
-  return enc;
-}
-
-}  // namespace
-
-TEST(CodecManagerTest, ExternalEncoderFec) {
-  auto enc0 = CreateMockEncoder();
-  auto enc1 = CreateMockEncoder();
-  auto enc2 = CreateMockEncoder();
-  {
-    ::testing::InSequence s;
-    EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
-    EXPECT_CALL(*enc1, SetFec(true)).WillOnce(Return(true));
-    EXPECT_CALL(*enc2, SetFec(true)).WillOnce(Return(false));
-  }
-
-  CodecManager cm;
-  RentACodec rac;
-
-  // use_codec_fec starts out false.
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-  cm.GetStackParams()->speech_encoder = std::move(enc0);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-
-  // Set it to true.
-  EXPECT_EQ(true, cm.SetCodecFEC(true));
-  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
-  cm.GetStackParams()->speech_encoder = std::move(enc1);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
-
-  // Switch to a codec that doesn't support it.
-  cm.GetStackParams()->speech_encoder = std::move(enc2);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.cc b/modules/audio_coding/acm2/rent_a_codec.cc
index 7601519..bfddc42b 100644
--- a/modules/audio_coding/acm2/rent_a_codec.cc
+++ b/modules/audio_coding/acm2/rent_a_codec.cc
@@ -13,35 +13,9 @@
 #include <memory>
 #include <utility>
 
-#include "absl/strings/match.h"
-#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
-#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
-#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
 #include "rtc_base/logging.h"
-#ifdef WEBRTC_CODEC_ILBC
-#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_ISACFX
-#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"  // nogncheck
-#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_ISAC
-#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"  // nogncheck
-#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
-#endif
-#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
-#ifdef WEBRTC_CODEC_RED
-#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"  // nogncheck
-#endif
 #include "modules/audio_coding/acm2/acm_codec_database.h"
 
-#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
-#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
-#endif
-
 namespace webrtc {
 namespace acm2 {
 
@@ -55,7 +29,8 @@
 
 absl::optional<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
   absl::optional<int> mi = CodecIndexFromId(codec_id);
-  return mi ? absl::optional<CodecInst>(Database()[*mi]) : absl::nullopt;
+  return mi ? absl::optional<CodecInst>(ACMCodecDB::database_[*mi])
+            : absl::nullopt;
 }
 
 absl::optional<RentACodec::CodecId> RentACodec::CodecIdByInst(
@@ -81,20 +56,6 @@
   return ci;
 }
 
-absl::optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
-                                                        size_t num_channels) {
-  auto i = CodecIndexFromId(codec_id);
-  return i ? absl::optional<bool>(
-                 ACMCodecDB::codec_settings_[*i].channel_support >=
-                 num_channels)
-           : absl::nullopt;
-}
-
-rtc::ArrayView<const CodecInst> RentACodec::Database() {
-  return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
-                                         NumberOfCodecs());
-}
-
 absl::optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
     CodecId codec_id,
     size_t num_channels) {
@@ -107,200 +68,5 @@
              : ned;
 }
 
-RentACodec::RegistrationResult RentACodec::RegisterCngPayloadType(
-    std::map<int, int>* pt_map,
-    const CodecInst& codec_inst) {
-  if (!absl::EqualsIgnoreCase(codec_inst.plname, "CN"))
-    return RegistrationResult::kSkip;
-  switch (codec_inst.plfreq) {
-    case 8000:
-    case 16000:
-    case 32000:
-    case 48000:
-      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
-      return RegistrationResult::kOk;
-    default:
-      return RegistrationResult::kBadFreq;
-  }
-}
-
-RentACodec::RegistrationResult RentACodec::RegisterRedPayloadType(
-    std::map<int, int>* pt_map,
-    const CodecInst& codec_inst) {
-  if (!absl::EqualsIgnoreCase(codec_inst.plname, "RED"))
-    return RegistrationResult::kSkip;
-  switch (codec_inst.plfreq) {
-    case 8000:
-      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
-      return RegistrationResult::kOk;
-    default:
-      return RegistrationResult::kBadFreq;
-  }
-}
-
-namespace {
-
-// Returns a new speech encoder, or null on error.
-// TODO(kwiberg): Don't handle errors here (bug 5033)
-std::unique_ptr<AudioEncoder> CreateEncoder(
-    const CodecInst& speech_inst,
-    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "isac"))
-    return std::unique_ptr<AudioEncoder>(
-        new AudioEncoderIsacFixImpl(speech_inst, bwinfo));
-#endif
-#if defined(WEBRTC_CODEC_ISAC)
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "isac"))
-    return std::unique_ptr<AudioEncoder>(
-        new AudioEncoderIsacFloatImpl(speech_inst, bwinfo));
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "opus"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderOpusImpl(speech_inst));
-#endif
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "pcmu"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmU(speech_inst));
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "pcma"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmA(speech_inst));
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "l16"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcm16B(speech_inst));
-#ifdef WEBRTC_CODEC_ILBC
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "ilbc"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderIlbcImpl(speech_inst));
-#endif
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "g722"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderG722Impl(speech_inst));
-  RTC_LOG_F(LS_ERROR) << "Could not create encoder of type "
-                      << speech_inst.plname;
-  return std::unique_ptr<AudioEncoder>();
-}
-
-std::unique_ptr<AudioEncoder> CreateRedEncoder(
-    std::unique_ptr<AudioEncoder> encoder,
-    int red_payload_type) {
-#ifdef WEBRTC_CODEC_RED
-  AudioEncoderCopyRed::Config config;
-  config.payload_type = red_payload_type;
-  config.speech_encoder = std::move(encoder);
-  return std::unique_ptr<AudioEncoder>(
-      new AudioEncoderCopyRed(std::move(config)));
-#else
-  return std::unique_ptr<AudioEncoder>();
-#endif
-}
-
-std::unique_ptr<AudioEncoder> CreateCngEncoder(
-    std::unique_ptr<AudioEncoder> encoder,
-    int payload_type,
-    ACMVADMode vad_mode) {
-  AudioEncoderCngConfig config;
-  config.num_channels = encoder->NumChannels();
-  config.payload_type = payload_type;
-  config.speech_encoder = std::move(encoder);
-  switch (vad_mode) {
-    case VADNormal:
-      config.vad_mode = Vad::kVadNormal;
-      break;
-    case VADLowBitrate:
-      config.vad_mode = Vad::kVadLowBitrate;
-      break;
-    case VADAggr:
-      config.vad_mode = Vad::kVadAggressive;
-      break;
-    case VADVeryAggr:
-      config.vad_mode = Vad::kVadVeryAggressive;
-      break;
-    default:
-      FATAL();
-  }
-  return CreateComfortNoiseEncoder(std::move(config));
-}
-
-std::unique_ptr<AudioDecoder> CreateIsacDecoder(
-    int sample_rate_hz,
-    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
-  return std::unique_ptr<AudioDecoder>(
-      new AudioDecoderIsacFixImpl(sample_rate_hz, bwinfo));
-#elif defined(WEBRTC_CODEC_ISAC)
-  return std::unique_ptr<AudioDecoder>(
-      new AudioDecoderIsacFloatImpl(sample_rate_hz, bwinfo));
-#else
-  FATAL() << "iSAC is not supported.";
-  return std::unique_ptr<AudioDecoder>();
-#endif
-}
-
-}  // namespace
-
-RentACodec::RentACodec() {
-#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
-  isac_bandwidth_info_ = new LockedIsacBandwidthInfo;
-#endif
-}
-RentACodec::~RentACodec() = default;
-
-std::unique_ptr<AudioEncoder> RentACodec::RentEncoder(
-    const CodecInst& codec_inst) {
-  return CreateEncoder(codec_inst, isac_bandwidth_info_);
-}
-
-RentACodec::StackParameters::StackParameters() {
-  // Register the default payload types for RED and CNG.
-  for (const CodecInst& ci : RentACodec::Database()) {
-    RentACodec::RegisterCngPayloadType(&cng_payload_types, ci);
-    RentACodec::RegisterRedPayloadType(&red_payload_types, ci);
-  }
-}
-
-RentACodec::StackParameters::~StackParameters() = default;
-
-std::unique_ptr<AudioEncoder> RentACodec::RentEncoderStack(
-    StackParameters* param) {
-  if (!param->speech_encoder)
-    return nullptr;
-
-  if (param->use_codec_fec) {
-    // Switch FEC on. On failure, remember that FEC is off.
-    if (!param->speech_encoder->SetFec(true))
-      param->use_codec_fec = false;
-  } else {
-    // Switch FEC off. This shouldn't fail.
-    const bool success = param->speech_encoder->SetFec(false);
-    RTC_DCHECK(success);
-  }
-
-  auto pt = [&param](const std::map<int, int>& m) {
-    auto it = m.find(param->speech_encoder->SampleRateHz());
-    return it == m.end() ? absl::nullopt : absl::optional<int>(it->second);
-  };
-  auto cng_pt = pt(param->cng_payload_types);
-  param->use_cng =
-      param->use_cng && cng_pt && param->speech_encoder->NumChannels() == 1;
-  auto red_pt = pt(param->red_payload_types);
-  param->use_red = param->use_red && red_pt;
-
-  if (param->use_cng || param->use_red) {
-    // The RED and CNG encoders need to be in sync with the speech encoder, so
-    // reset the latter to ensure its buffer is empty.
-    param->speech_encoder->Reset();
-  }
-  std::unique_ptr<AudioEncoder> encoder_stack =
-      std::move(param->speech_encoder);
-  if (param->use_red) {
-    encoder_stack = CreateRedEncoder(std::move(encoder_stack), *red_pt);
-  }
-  if (param->use_cng) {
-    encoder_stack =
-        CreateCngEncoder(std::move(encoder_stack), *cng_pt, param->vad_mode);
-  }
-  return encoder_stack;
-}
-
-std::unique_ptr<AudioDecoder> RentACodec::RentIsacDecoder(int sample_rate_hz) {
-  return CreateIsacDecoder(sample_rate_hz, isac_bandwidth_info_);
-}
-
 }  // namespace acm2
 }  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.h b/modules/audio_coding/acm2/rent_a_codec.h
index b0ad382..2cf1c6e 100644
--- a/modules/audio_coding/acm2/rent_a_codec.h
+++ b/modules/audio_coding/acm2/rent_a_codec.h
@@ -31,8 +31,7 @@
 
 namespace acm2 {
 
-class RentACodec {
- public:
+struct RentACodec {
   enum class CodecId {
 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
     kISAC,
@@ -133,64 +132,9 @@
     return payload_type >= 0 && payload_type <= 127;
   }
 
-  static rtc::ArrayView<const CodecInst> Database();
-
-  static absl::optional<bool> IsSupportedNumChannels(CodecId codec_id,
-                                                     size_t num_channels);
-
   static absl::optional<NetEqDecoder> NetEqDecoderFromCodecId(
       CodecId codec_id,
       size_t num_channels);
-
-  // Parse codec_inst and extract payload types. If the given CodecInst was for
-  // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
-  // return kBadFreq; otherwise, update the given RTP timestamp rate (Hz) ->
-  // payload type map and return kOk.
-  enum class RegistrationResult { kOk, kSkip, kBadFreq };
-  static RegistrationResult RegisterCngPayloadType(std::map<int, int>* pt_map,
-                                                   const CodecInst& codec_inst);
-  static RegistrationResult RegisterRedPayloadType(std::map<int, int>* pt_map,
-                                                   const CodecInst& codec_inst);
-
-  RentACodec();
-  ~RentACodec();
-
-  // Creates and returns an audio encoder built to the given specification.
-  // Returns null in case of error.
-  std::unique_ptr<AudioEncoder> RentEncoder(const CodecInst& codec_inst);
-
-  struct StackParameters {
-    StackParameters();
-    ~StackParameters();
-
-    std::unique_ptr<AudioEncoder> speech_encoder;
-
-    bool use_codec_fec = false;
-    bool use_red = false;
-    bool use_cng = false;
-    ACMVADMode vad_mode = VADNormal;
-
-    // Maps from RTP timestamp rate (in Hz) to payload type.
-    std::map<int, int> cng_payload_types;
-    std::map<int, int> red_payload_types;
-  };
-
-  // Creates and returns an audio encoder stack constructed to the given
-  // specification. If the specification isn't compatible with the encoder, it
-  // will be changed to match (things will be switched off). The speech encoder
-  // will be stolen. If the specification isn't complete, returns nullptr.
-  std::unique_ptr<AudioEncoder> RentEncoderStack(StackParameters* param);
-
-  // Creates and returns an iSAC decoder.
-  std::unique_ptr<AudioDecoder> RentIsacDecoder(int sample_rate_hz);
-
- private:
-  std::unique_ptr<AudioEncoder> speech_encoder_;
-  std::unique_ptr<AudioEncoder> cng_encoder_;
-  std::unique_ptr<AudioEncoder> red_encoder_;
-  rtc::scoped_refptr<LockedIsacBandwidthInfo> isac_bandwidth_info_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(RentACodec);
 };
 
 }  // namespace acm2
diff --git a/modules/audio_coding/acm2/rent_a_codec_unittest.cc b/modules/audio_coding/acm2/rent_a_codec_unittest.cc
deleted file mode 100644
index fd3329c..0000000
--- a/modules/audio_coding/acm2/rent_a_codec_unittest.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "common_types.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "rtc_base/arraysize.h"
-#include "test/gtest.h"
-#include "test/mock_audio_encoder.h"
-
-namespace webrtc {
-namespace acm2 {
-
-using ::testing::Return;
-
-namespace {
-
-const int kDataLengthSamples = 80;
-const int kPacketSizeSamples = 2 * kDataLengthSamples;
-const int16_t kZeroData[kDataLengthSamples] = {0};
-const CodecInst kDefaultCodecInst = {0, "pcmu", 8000, kPacketSizeSamples,
-                                     1, 64000};
-const int kCngPt = 13;
-
-class Marker final {
- public:
-  MOCK_METHOD1(Mark, void(std::string desc));
-};
-
-}  // namespace
-
-class RentACodecTestF : public ::testing::Test {
- protected:
-  void CreateCodec() {
-    auto speech_encoder = rent_a_codec_.RentEncoder(kDefaultCodecInst);
-    ASSERT_TRUE(speech_encoder);
-    RentACodec::StackParameters param;
-    param.use_cng = true;
-    param.speech_encoder = std::move(speech_encoder);
-    encoder_ = rent_a_codec_.RentEncoderStack(&param);
-  }
-
-  void EncodeAndVerify(size_t expected_out_length,
-                       uint32_t expected_timestamp,
-                       int expected_payload_type,
-                       int expected_send_even_if_empty) {
-    rtc::Buffer out;
-    AudioEncoder::EncodedInfo encoded_info;
-    encoded_info = encoder_->Encode(timestamp_, kZeroData, &out);
-    timestamp_ += kDataLengthSamples;
-    EXPECT_TRUE(encoded_info.redundant.empty());
-    EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
-    EXPECT_EQ(expected_timestamp, encoded_info.encoded_timestamp);
-    if (expected_payload_type >= 0)
-      EXPECT_EQ(expected_payload_type, encoded_info.payload_type);
-    if (expected_send_even_if_empty >= 0)
-      EXPECT_EQ(static_cast<bool>(expected_send_even_if_empty),
-                encoded_info.send_even_if_empty);
-  }
-
-  RentACodec rent_a_codec_;
-  std::unique_ptr<AudioEncoder> encoder_;
-  uint32_t timestamp_ = 0;
-};
-
-// This test verifies that CNG frames are delivered as expected. Since the frame
-// size is set to 20 ms, we expect the first encode call to produce no output
-// (which is signaled as 0 bytes output of type kNoEncoding). The next encode
-// call should produce one SID frame of 9 bytes. The third call should not
-// result in any output (just like the first one). The fourth and final encode
-// call should produce an "empty frame", which is like no output, but with
-// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
-// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
-// module.)
-TEST_F(RentACodecTestF, VerifyCngFrames) {
-  CreateCodec();
-  uint32_t expected_timestamp = timestamp_;
-  // Verify no frame.
-  {
-    SCOPED_TRACE("First encoding");
-    EncodeAndVerify(0, expected_timestamp, -1, -1);
-  }
-
-  // Verify SID frame delivered.
-  {
-    SCOPED_TRACE("Second encoding");
-    EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
-  }
-
-  // Verify no frame.
-  {
-    SCOPED_TRACE("Third encoding");
-    EncodeAndVerify(0, expected_timestamp, -1, -1);
-  }
-
-  // Verify NoEncoding.
-  expected_timestamp += 2 * kDataLengthSamples;
-  {
-    SCOPED_TRACE("Fourth encoding");
-    EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
-  }
-}
-
-TEST(RentACodecTest, ExternalEncoder) {
-  const int kSampleRateHz = 8000;
-  auto* external_encoder = new MockAudioEncoder;
-  EXPECT_CALL(*external_encoder, SampleRateHz())
-      .WillRepeatedly(Return(kSampleRateHz));
-  EXPECT_CALL(*external_encoder, NumChannels()).WillRepeatedly(Return(1));
-  EXPECT_CALL(*external_encoder, SetFec(false)).WillRepeatedly(Return(true));
-
-  RentACodec rac;
-  RentACodec::StackParameters param;
-  param.speech_encoder = std::unique_ptr<AudioEncoder>(external_encoder);
-  std::unique_ptr<AudioEncoder> encoder_stack = rac.RentEncoderStack(&param);
-  EXPECT_EQ(external_encoder, encoder_stack.get());
-  const int kPacketSizeSamples = kSampleRateHz / 100;
-  int16_t audio[kPacketSizeSamples] = {0};
-  rtc::Buffer encoded;
-  AudioEncoder::EncodedInfo info;
-
-  Marker marker;
-  {
-    ::testing::InSequence s;
-    info.encoded_timestamp = 0;
-    EXPECT_CALL(*external_encoder,
-                EncodeImpl(0, rtc::ArrayView<const int16_t>(audio), &encoded))
-        .WillOnce(Return(info));
-    EXPECT_CALL(marker, Mark("A"));
-    EXPECT_CALL(marker, Mark("B"));
-    EXPECT_CALL(marker, Mark("C"));
-  }
-
-  info = encoder_stack->Encode(0, audio, &encoded);
-  EXPECT_EQ(0u, info.encoded_timestamp);
-  marker.Mark("A");
-
-  // Change to internal encoder.
-  CodecInst codec_inst = kDefaultCodecInst;
-  codec_inst.pacsize = kPacketSizeSamples;
-  param.speech_encoder = rac.RentEncoder(codec_inst);
-  ASSERT_TRUE(param.speech_encoder);
-  AudioEncoder* enc = param.speech_encoder.get();
-  std::unique_ptr<AudioEncoder> stack = rac.RentEncoderStack(&param);
-  EXPECT_EQ(enc, stack.get());
-
-  // Don't expect any more calls to the external encoder.
-  info = stack->Encode(1, audio, &encoded);
-  marker.Mark("B");
-  encoder_stack.reset();
-  marker.Mark("C");
-}
-
-// Verify that the speech encoder's Reset method is called when CNG or RED
-// (or both) are switched on, but not when they're switched off.
-void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
-  auto make_enc = [] {
-    auto speech_encoder =
-        std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
-    EXPECT_CALL(*speech_encoder, NumChannels()).WillRepeatedly(Return(1));
-    EXPECT_CALL(*speech_encoder, Max10MsFramesInAPacket())
-        .WillRepeatedly(Return(2));
-    EXPECT_CALL(*speech_encoder, SampleRateHz()).WillRepeatedly(Return(8000));
-    EXPECT_CALL(*speech_encoder, SetFec(false)).WillRepeatedly(Return(true));
-    return speech_encoder;
-  };
-  auto speech_encoder1 = make_enc();
-  auto speech_encoder2 = make_enc();
-  Marker marker;
-  {
-    ::testing::InSequence s;
-    EXPECT_CALL(marker, Mark("disabled"));
-    EXPECT_CALL(marker, Mark("enabled"));
-    if (use_cng || use_red)
-      EXPECT_CALL(*speech_encoder2, Reset());
-  }
-
-  RentACodec::StackParameters param1, param2;
-  param1.speech_encoder = std::move(speech_encoder1);
-  param2.speech_encoder = std::move(speech_encoder2);
-  param2.use_cng = use_cng;
-  param2.use_red = use_red;
-  marker.Mark("disabled");
-  RentACodec rac;
-  rac.RentEncoderStack(&param1);
-  marker.Mark("enabled");
-  rac.RentEncoderStack(&param2);
-}
-
-TEST(RentACodecTest, CngResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(true, false);
-}
-
-TEST(RentACodecTest, RedResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(false, true);
-}
-
-TEST(RentACodecTest, CngAndRedResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(true, true);
-}
-
-TEST(RentACodecTest, NoCngAndRedNoSpeechEncoderReset) {
-  TestCngAndRedResetSpeechEncoder(false, false);
-}
-
-TEST(RentACodecTest, RentEncoderError) {
-  const CodecInst codec_inst = {
-      0, "Robert'); DROP TABLE Students;", 8000, 160, 1, 64000};
-  RentACodec rent_a_codec;
-  EXPECT_FALSE(rent_a_codec.RentEncoder(codec_inst));
-}
-
-TEST(RentACodecTest, RentEncoderStackWithoutSpeechEncoder) {
-  RentACodec::StackParameters sp;
-  EXPECT_EQ(nullptr, sp.speech_encoder);
-  EXPECT_EQ(nullptr, RentACodec().RentEncoderStack(&sp));
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
index 85084c8..9e47a06 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
@@ -46,17 +46,7 @@
                                    kEventLogMinBitrateChangeBps,
                                    kEventLogMinBitrateChangeFraction,
                                    kEventLogMinPacketLossChangeFraction)
-              : nullptr),
-      enable_bitrate_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-BitrateAdaptation")),
-      enable_dtx_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-DtxAdaptation")),
-      enable_fec_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-FecAdaptation")),
-      enable_channel_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-ChannelAdaptation")),
-      enable_frame_length_adaptation_(webrtc::field_trial::IsEnabled(
-          "WebRTC-Audio-FrameLengthAdaptation")) {
+              : nullptr) {
   RTC_DCHECK(controller_manager_);
 }
 
@@ -157,24 +147,6 @@
   }
   prev_config_ = config;
 
-  // Prevent certain controllers from taking action (determined by field trials)
-  if (!enable_bitrate_adaptation_ && config.bitrate_bps) {
-    config.bitrate_bps.reset();
-  }
-  if (!enable_dtx_adaptation_ && config.enable_dtx) {
-    config.enable_dtx.reset();
-  }
-  if (!enable_fec_adaptation_ && config.enable_fec) {
-    config.enable_fec.reset();
-    config.uplink_packet_loss_fraction.reset();
-  }
-  if (!enable_frame_length_adaptation_ && config.frame_length_ms) {
-    config.frame_length_ms.reset();
-  }
-  if (!enable_channel_adaptation_ && config.num_channels) {
-    config.num_channels.reset();
-  }
-
   if (debug_dump_writer_)
     debug_dump_writer_->DumpEncoderRuntimeConfig(config, rtc::TimeMillis());
 
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
index d3ecce0..4c1c19b 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
@@ -83,12 +83,6 @@
 
   ANAStats stats_;
 
-  const bool enable_bitrate_adaptation_;
-  const bool enable_dtx_adaptation_;
-  const bool enable_fec_adaptation_;
-  const bool enable_channel_adaptation_;
-  const bool enable_frame_length_adaptation_;
-
   RTC_DISALLOW_COPY_AND_ASSIGN(AudioNetworkAdaptorImpl);
 };
 
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
index 5948ac3..be9550a 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
@@ -51,7 +51,7 @@
     return false;
   }
   auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
-  return *ana_event->config_ == config;
+  return ana_event->config() == config;
 }
 
 MATCHER_P(EncoderRuntimeConfigIs, config, "") {
diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
index df97594..42189c3 100644
--- a/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
@@ -36,7 +36,7 @@
     return false;
   }
   auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
-  return *ana_event->config_ == config;
+  return ana_event->config() == config;
 }
 
 struct EventLogWriterStates {
diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
index 28767af..6692a51 100644
--- a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
+++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
@@ -96,7 +96,7 @@
                           const int32_t gainQ10,
                           int32_t* CurveQ16) {
   int32_t CorrQ11[AR_ORDER + 1];
-  int32_t sum, tmpGain;
+  int64_t sum, tmpGain;
   int32_t diffQ16[FRAMESAMPLES / 8];
   const int16_t* CS_ptrQ9;
   int k, n;
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index adc6656..1a88acf 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -471,6 +471,8 @@
     : payload_type_(payload_type),
       send_side_bwe_with_overhead_(
           webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")),
+      use_link_capacity_for_adaptation_(webrtc::field_trial::IsEnabled(
+          "WebRTC-Audio-LinkCapacityAdaptation")),
       adjust_bandwidth_(
           webrtc::field_trial::IsEnabled("WebRTC-AdjustOpusBandwidth")),
       bitrate_changed_(true),
@@ -605,7 +607,8 @@
 
 void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
     int target_audio_bitrate_bps,
-    absl::optional<int64_t> bwe_period_ms) {
+    absl::optional<int64_t> bwe_period_ms,
+    absl::optional<int64_t> link_capacity_allocation_bps) {
   if (audio_network_adaptor_) {
     audio_network_adaptor_->SetTargetAudioBitrate(target_audio_bitrate_bps);
     // We give smoothed bitrate allocation to audio network adaptor as
@@ -623,6 +626,9 @@
       bitrate_smoother_->SetTimeConstantMs(*bwe_period_ms * 4);
     bitrate_smoother_->AddSample(target_audio_bitrate_bps);
 
+    if (link_capacity_allocation_bps)
+      link_capacity_allocation_bps_ = link_capacity_allocation_bps;
+
     ApplyAudioNetworkAdaptor();
   } else if (send_side_bwe_with_overhead_) {
     if (!overhead_bytes_per_packet_) {
@@ -641,6 +647,18 @@
     SetTargetBitrate(target_audio_bitrate_bps);
   }
 }
+void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
+    int target_audio_bitrate_bps,
+    absl::optional<int64_t> bwe_period_ms) {
+  OnReceivedUplinkBandwidth(target_audio_bitrate_bps, bwe_period_ms,
+                            absl::nullopt);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkAllocation(
+    BitrateAllocationUpdate update) {
+  OnReceivedUplinkBandwidth(update.target_bitrate.bps(), update.bwe_period.ms(),
+                            update.link_capacity.bps());
+}
 
 void AudioEncoderOpusImpl::OnReceivedRtt(int rtt_ms) {
   if (!audio_network_adaptor_)
@@ -875,14 +893,20 @@
 
 void AudioEncoderOpusImpl::MaybeUpdateUplinkBandwidth() {
   if (audio_network_adaptor_) {
-    int64_t now_ms = rtc::TimeMillis();
-    if (!bitrate_smoother_last_update_time_ ||
-        now_ms - *bitrate_smoother_last_update_time_ >=
-            config_.uplink_bandwidth_update_interval_ms) {
-      absl::optional<float> smoothed_bitrate = bitrate_smoother_->GetAverage();
-      if (smoothed_bitrate)
-        audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
-      bitrate_smoother_last_update_time_ = now_ms;
+    if (use_link_capacity_for_adaptation_ && link_capacity_allocation_bps_) {
+      audio_network_adaptor_->SetUplinkBandwidth(
+          *link_capacity_allocation_bps_);
+    } else {
+      int64_t now_ms = rtc::TimeMillis();
+      if (!bitrate_smoother_last_update_time_ ||
+          now_ms - *bitrate_smoother_last_update_time_ >=
+              config_.uplink_bandwidth_update_interval_ms) {
+        absl::optional<float> smoothed_bitrate =
+            bitrate_smoother_->GetAverage();
+        if (smoothed_bitrate)
+          audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
+        bitrate_smoother_last_update_time_ = now_ms;
+      }
     }
   }
 }
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index c26c6da..150423f 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.h
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -119,6 +119,7 @@
   void OnReceivedUplinkBandwidth(
       int target_audio_bitrate_bps,
       absl::optional<int64_t> bwe_period_ms) override;
+  void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
   void OnReceivedRtt(int rtt_ms) override;
   void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
   void SetReceiverFrameLengthRange(int min_frame_length_ms,
@@ -164,6 +165,11 @@
   void SetNumChannelsToEncode(size_t num_channels_to_encode);
   void SetProjectedPacketLossRate(float fraction);
 
+  void OnReceivedUplinkBandwidth(
+      int target_audio_bitrate_bps,
+      absl::optional<int64_t> bwe_period_ms,
+      absl::optional<int64_t> link_capacity_allocation);
+
   // TODO(minyue): remove "override" when we can deprecate
   // |AudioEncoder::SetTargetBitrate|.
   void SetTargetBitrate(int target_bps) override;
@@ -178,6 +184,7 @@
   AudioEncoderOpusConfig config_;
   const int payload_type_;
   const bool send_side_bwe_with_overhead_;
+  const bool use_link_capacity_for_adaptation_;
   const bool adjust_bandwidth_;
   bool bitrate_changed_;
   float packet_loss_rate_;
@@ -195,6 +202,7 @@
   absl::optional<size_t> overhead_bytes_per_packet_;
   const std::unique_ptr<SmoothingFilter> bitrate_smoother_;
   absl::optional<int64_t> bitrate_smoother_last_update_time_;
+  absl::optional<int64_t> link_capacity_allocation_bps_;
   int consecutive_dtx_frames_;
 
   friend struct AudioEncoderOpus;
diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h
index b9f2228..f9fdba5 100644
--- a/modules/audio_coding/include/audio_coding_module.h
+++ b/modules/audio_coding/include/audio_coding_module.h
@@ -154,40 +154,6 @@
   //   Sender
   //
 
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t RegisterSendCodec()
-  // Registers a codec, specified by |send_codec|, as sending codec.
-  // This API can be called multiple of times to register Codec. The last codec
-  // registered overwrites the previous ones.
-  // The API can also be used to change payload type for CNG and RED, which are
-  // registered by default to default payload types.
-  // Note that registering CNG and RED won't overwrite speech codecs.
-  // This API can be called to set/change the send payload-type, frame-size
-  // or encoding rate (if applicable for the codec).
-  //
-  // Note: If a stereo codec is registered as send codec, VAD/DTX will
-  // automatically be turned off, since it is not supported for stereo sending.
-  //
-  // Note: If a secondary encoder is already registered, and the new send-codec
-  // has a sampling rate that does not match the secondary encoder, the
-  // secondary encoder will be unregistered.
-  //
-  // Input:
-  //   -send_codec         : Parameters of the codec to be registered, c.f.
-  //                         common_types.h for the definition of
-  //                         CodecInst.
-  //
-  // Return value:
-  //   -1 if failed to initialize,
-  //    0 if succeeded.
-  //
-  virtual int32_t RegisterSendCodec(const CodecInst& send_codec) = 0;
-
-  // Registers |external_speech_encoder| as encoder. The new encoder will
-  // replace any previously registered speech encoder (internal or external).
-  virtual void RegisterExternalSendCodec(
-      AudioEncoder* external_speech_encoder) = 0;
-
   // |modifier| is called exactly once with one argument: a pointer to the
   // unique_ptr that holds the current encoder (which is null if there is no
   // current encoder). For the duration of the call, |modifier| has exclusive
@@ -258,71 +224,6 @@
   virtual int32_t Add10MsData(const AudioFrame& audio_frame) = 0;
 
   ///////////////////////////////////////////////////////////////////////////
-  // (RED) Redundant Coding
-  //
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetREDStatus()
-  // configure RED status i.e. on/off.
-  //
-  // RFC 2198 describes a solution which has a single payload type which
-  // signifies a packet with redundancy. That packet then becomes a container,
-  // encapsulating multiple payloads into a single RTP packet.
-  // Such a scheme is flexible, since any amount of redundancy may be
-  // encapsulated within a single packet.  There is, however, a small overhead
-  // since each encapsulated payload must be preceded by a header indicating
-  // the type of data enclosed.
-  //
-  // Input:
-  //   -enable_red         : if true RED is enabled, otherwise RED is
-  //                         disabled.
-  //
-  // Return value:
-  //   -1 if failed to set RED status,
-  //    0 if succeeded.
-  //
-  virtual int32_t SetREDStatus(bool enable_red) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // bool REDStatus()
-  // Get RED status
-  //
-  // Return value:
-  //   true if RED is enabled,
-  //   false if RED is disabled.
-  //
-  virtual bool REDStatus() const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // (FEC) Forward Error Correction (codec internal)
-  //
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetCodecFEC()
-  // Configures codec internal FEC status i.e. on/off. No effects on codecs that
-  // do not provide internal FEC.
-  //
-  // Input:
-  //   -enable_fec         : if true FEC will be enabled otherwise the FEC is
-  //                         disabled.
-  //
-  // Return value:
-  //   -1 if failed, or the codec does not support FEC
-  //    0 if succeeded.
-  //
-  virtual int SetCodecFEC(bool enable_codec_fec) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // bool CodecFEC()
-  // Gets status of codec internal FEC.
-  //
-  // Return value:
-  //   true if FEC is enabled,
-  //   false if FEC is disabled.
-  //
-  virtual bool CodecFEC() const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
   // int SetPacketLossRate()
   // Sets expected packet loss rate for encoding. Some encoders provide packet
   // loss gnostic encoding to make stream less sensitive to packet losses,
@@ -344,55 +245,6 @@
   //
 
   ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetVAD()
-  // If DTX is enabled & the codec does not have internal DTX/VAD
-  // WebRtc VAD will be automatically enabled and |enable_vad| is ignored.
-  //
-  // If DTX is disabled but VAD is enabled no DTX packets are send,
-  // regardless of whether the codec has internal DTX/VAD or not. In this
-  // case, WebRtc VAD is running to label frames as active/in-active.
-  //
-  // NOTE! VAD/DTX is not supported when sending stereo.
-  //
-  // Inputs:
-  //   -enable_dtx         : if true DTX is enabled,
-  //                         otherwise DTX is disabled.
-  //   -enable_vad         : if true VAD is enabled,
-  //                         otherwise VAD is disabled.
-  //   -vad_mode           : determines the aggressiveness of VAD. A more
-  //                         aggressive mode results in more frames labeled
-  //                         as in-active, c.f. definition of
-  //                         ACMVADMode in audio_coding_module_typedefs.h
-  //                         for valid values.
-  //
-  // Return value:
-  //   -1 if failed to set up VAD/DTX,
-  //    0 if succeeded.
-  //
-  virtual int32_t SetVAD(const bool enable_dtx = true,
-                         const bool enable_vad = false,
-                         const ACMVADMode vad_mode = VADNormal) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t VAD()
-  // Get VAD status.
-  //
-  // Outputs:
-  //   -dtx_enabled        : is set to true if DTX is enabled, otherwise
-  //                         is set to false.
-  //   -vad_enabled        : is set to true if VAD is enabled, otherwise
-  //                         is set to false.
-  //   -vad_mode            : is set to the current aggressiveness of VAD.
-  //
-  // Return value:
-  //   -1 if fails to retrieve the setting of DTX/VAD,
-  //    0 if succeeded.
-  //
-  virtual int32_t VAD(bool* dtx_enabled,
-                      bool* vad_enabled,
-                      ACMVADMode* vad_mode) const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
   // int32_t RegisterVADCallback()
   // Call this method to register a callback function which is called
   // any time that ACM encounters an empty frame. That is a frame which is
@@ -455,29 +307,6 @@
   virtual bool RegisterReceiveCodec(int rtp_payload_type,
                                     const SdpAudioFormat& audio_format) = 0;
 
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t RegisterReceiveCodec()
-  // Register possible decoders, can be called multiple times for
-  // codecs, CNG-NB, CNG-WB, CNG-SWB, AVT and RED.
-  //
-  // Input:
-  //   -receive_codec      : parameters of the codec to be registered, c.f.
-  //                         common_types.h for the definition of
-  //                         CodecInst.
-  //
-  // Return value:
-  //   -1 if failed to register the codec
-  //    0 if the codec registered successfully.
-  //
-  virtual int RegisterReceiveCodec(const CodecInst& receive_codec) = 0;
-
-  // Register a decoder; call repeatedly to register multiple decoders. |df| is
-  // a decoder factory that returns an iSAC decoder; it will be called once if
-  // the decoder being registered is iSAC.
-  virtual int RegisterReceiveCodec(
-      const CodecInst& receive_codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) = 0;
-
   // Registers an external decoder. The name is only used to provide information
   // back to the caller about the decoder. Hence, the name is arbitrary, and may
   // be empty.
diff --git a/modules/audio_coding/include/audio_coding_module_typedefs.h b/modules/audio_coding/include/audio_coding_module_typedefs.h
index cd4351b..bafff72 100644
--- a/modules/audio_coding/include/audio_coding_module_typedefs.h
+++ b/modules/audio_coding/include/audio_coding_module_typedefs.h
@@ -13,6 +13,8 @@
 
 #include <map>
 
+#include "rtc_base/deprecation.h"
+
 namespace webrtc {
 
 ///////////////////////////////////////////////////////////////////////////
@@ -43,6 +45,84 @@
   kAudio = 1,
 };
 
+// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
+struct AudioDecodingCallStats {
+  AudioDecodingCallStats()
+      : calls_to_silence_generator(0),
+        calls_to_neteq(0),
+        decoded_normal(0),
+        decoded_plc(0),
+        decoded_cng(0),
+        decoded_plc_cng(0),
+        decoded_muted_output(0) {}
+
+  int calls_to_silence_generator;  // Number of calls where silence generated,
+                                   // and NetEq was disengaged from decoding.
+  int calls_to_neteq;              // Number of calls to NetEq.
+  int decoded_normal;  // Number of calls where audio RTP packet decoded.
+  int decoded_plc;     // Number of calls resulted in PLC.
+  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
+  int decoded_plc_cng;       // Number of calls resulted where PLC faded to CNG.
+  int decoded_muted_output;  // Number of calls returning a muted state output.
+};
+
+// NETEQ statistics.
+struct NetworkStatistics {
+  // current jitter buffer size in ms
+  uint16_t currentBufferSize;
+  // preferred (optimal) buffer size in ms
+  uint16_t preferredBufferSize;
+  // adding extra delay due to "peaky jitter"
+  bool jitterPeaksFound;
+  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
+  uint64_t totalSamplesReceived;
+  uint64_t concealedSamples;
+  uint64_t concealmentEvents;
+  uint64_t jitterBufferDelayMs;
+  // Stats below DO NOT correspond directly to anything in the WebRTC stats
+  // Loss rate (network + late); fraction between 0 and 1, scaled to Q14.
+  uint16_t currentPacketLossRate;
+  // Late loss rate; fraction between 0 and 1, scaled to Q14.
+  union {
+    RTC_DEPRECATED uint16_t currentDiscardRate;
+  };
+  // fraction (of original stream) of synthesized audio inserted through
+  // expansion (in Q14)
+  uint16_t currentExpandRate;
+  // fraction (of original stream) of synthesized speech inserted through
+  // expansion (in Q14)
+  uint16_t currentSpeechExpandRate;
+  // fraction of synthesized speech inserted through pre-emptive expansion
+  // (in Q14)
+  uint16_t currentPreemptiveRate;
+  // fraction of data removed through acceleration (in Q14)
+  uint16_t currentAccelerateRate;
+  // fraction of data coming from secondary decoding (in Q14)
+  uint16_t currentSecondaryDecodedRate;
+  // Fraction of secondary data, including FEC and RED, that is discarded (in
+  // Q14). Discarding of secondary data can be caused by the reception of the
+  // primary data, obsoleting the secondary data. It can also be caused by early
+  // or late arrival of secondary data.
+  uint16_t currentSecondaryDiscardedRate;
+  // clock-drift in parts-per-million (negative or positive)
+  int32_t clockDriftPPM;
+  // average packet waiting time in the jitter buffer (ms)
+  int meanWaitingTimeMs;
+  // median packet waiting time in the jitter buffer (ms)
+  int medianWaitingTimeMs;
+  // min packet waiting time in the jitter buffer (ms)
+  int minWaitingTimeMs;
+  // max packet waiting time in the jitter buffer (ms)
+  int maxWaitingTimeMs;
+  // added samples in off mode due to packet loss
+  size_t addedSamples;
+  // count of the number of buffer flushes
+  uint64_t packetBufferFlushes;
+  // number of samples expanded due to delayed packets
+  uint64_t delayedPacketOutageSamples;
+};
+
 }  // namespace webrtc
 
 #endif  // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
diff --git a/modules/audio_coding/neteq/decision_logic_unittest.cc b/modules/audio_coding/neteq/decision_logic_unittest.cc
index 08720d1..183b9c7 100644
--- a/modules/audio_coding/neteq/decision_logic_unittest.cc
+++ b/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -31,7 +31,7 @@
   TickTimer tick_timer;
   PacketBuffer packet_buffer(10, &tick_timer);
   DelayPeakDetector delay_peak_detector(&tick_timer);
-  DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
+  DelayManager delay_manager(240, 0, &delay_peak_detector, &tick_timer);
   BufferLevelFilter buffer_level_filter;
   DecisionLogic* logic = DecisionLogic::Create(
       fs_hz, output_size_samples, false, &decoder_database, packet_buffer,
@@ -48,7 +48,7 @@
   TickTimer tick_timer;
   PacketBuffer packet_buffer(10, &tick_timer);
   DelayPeakDetector delay_peak_detector(&tick_timer);
-  DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
+  DelayManager delay_manager(240, 0, &delay_peak_detector, &tick_timer);
   BufferLevelFilter buffer_level_filter;
   {
     test::ScopedFieldTrials field_trial(
diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc
index 628812a..67e6a13 100644
--- a/modules/audio_coding/neteq/delay_manager.cc
+++ b/modules/audio_coding/neteq/delay_manager.cc
@@ -62,6 +62,7 @@
 namespace webrtc {
 
 DelayManager::DelayManager(size_t max_packets_in_buffer,
+                           int base_min_target_delay_ms,
                            DelayPeakDetector* peak_detector,
                            const TickTimer* tick_timer)
     : first_packet_received_(false),
@@ -69,13 +70,14 @@
       iat_vector_(kMaxIat + 1, 0),
       iat_factor_(0),
       tick_timer_(tick_timer),
+      base_min_target_delay_ms_(base_min_target_delay_ms),
       base_target_level_(4),                   // In Q0 domain.
       target_level_(base_target_level_ << 8),  // In Q8 domain.
       packet_len_ms_(0),
       streaming_mode_(false),
       last_seq_no_(0),
       last_timestamp_(0),
-      minimum_delay_ms_(0),
+      minimum_delay_ms_(base_min_target_delay_ms_),
       maximum_delay_ms_(target_level_),
       iat_cumulative_sum_(0),
       max_iat_cumulative_sum_(0),
@@ -85,6 +87,8 @@
           field_trial::IsEnabled("WebRTC-Audio-NetEqFramelengthExperiment")),
       forced_limit_probability_(GetForcedLimitProbability()) {
   assert(peak_detector);  // Should never be NULL.
+  RTC_DCHECK_GE(base_min_target_delay_ms_, 0);
+  RTC_DCHECK_LE(minimum_delay_ms_, maximum_delay_ms_);
 
   Reset();
 }
@@ -485,7 +489,7 @@
            static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
     return false;
   }
-  minimum_delay_ms_ = delay_ms;
+  minimum_delay_ms_ = std::max(delay_ms, base_min_target_delay_ms_);
   return true;
 }
 
diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h
index cd5fc09..2c8081b 100644
--- a/modules/audio_coding/neteq/delay_manager.h
+++ b/modules/audio_coding/neteq/delay_manager.h
@@ -31,9 +31,11 @@
 
   // Create a DelayManager object. Notify the delay manager that the packet
   // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
-  // is the number of packet slots in the buffer). Supply a PeakDetector
-  // object to the DelayManager.
+  // is the number of packet slots in the buffer) and that the target delay
+  // should be greater than or equal to |base_min_target_delay_ms|. Supply a
+  // PeakDetector object to the DelayManager.
   DelayManager(size_t max_packets_in_buffer,
+               int base_min_target_delay_ms,
                DelayPeakDetector* peak_detector,
                const TickTimer* tick_timer);
 
@@ -144,6 +146,8 @@
   IATVector iat_vector_;                // Histogram of inter-arrival times.
   int iat_factor_;  // Forgetting factor for updating the IAT histogram (Q15).
   const TickTimer* tick_timer_;
+  const int base_min_target_delay_ms_;  // Lower bound for target_level_ and
+                                        // minimum_delay_ms_.
   // Time elapsed since last packet.
   std::unique_ptr<TickTimer::Stopwatch> packet_iat_stopwatch_;
   int base_target_level_;  // Currently preferred buffer level before peak
diff --git a/modules/audio_coding/neteq/delay_manager_unittest.cc b/modules/audio_coding/neteq/delay_manager_unittest.cc
index e4e865f..6281a15 100644
--- a/modules/audio_coding/neteq/delay_manager_unittest.cc
+++ b/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -27,6 +27,7 @@
 class DelayManagerTest : public ::testing::Test {
  protected:
   static const int kMaxNumberOfPackets = 240;
+  static const int kMinDelayMs = 0;
   static const int kTimeStepMs = 10;
   static const int kFs = 8000;
   static const int kFrameSizeMs = 20;
@@ -56,7 +57,8 @@
 
 void DelayManagerTest::RecreateDelayManager() {
   EXPECT_CALL(detector_, Reset()).Times(1);
-  dm_.reset(new DelayManager(kMaxNumberOfPackets, &detector_, &tick_timer_));
+  dm_.reset(new DelayManager(kMaxNumberOfPackets, kMinDelayMs, &detector_,
+                             &tick_timer_));
 }
 
 void DelayManagerTest::SetPacketAudioLength(int lengt_ms) {
diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc
index 97ce529..4a06d09 100644
--- a/modules/audio_coding/neteq/expand.cc
+++ b/modules/audio_coding/neteq/expand.cc
@@ -323,8 +323,7 @@
   current_lag_index_ = 0;
   lag_index_direction_ = 0;
   stop_muting_ = true;  // Do not mute signal any more.
-  statistics_->LogDelayedPacketOutageEvent(
-      rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
+  statistics_->LogDelayedPacketOutageEvent(expand_duration_samples_, fs_hz_);
 }
 
 void Expand::SetParametersForMergeAfterExpand() {
diff --git a/modules/audio_coding/neteq/expand_unittest.cc b/modules/audio_coding/neteq/expand_unittest.cc
index b4e6466..09914da 100644
--- a/modules/audio_coding/neteq/expand_unittest.cc
+++ b/modules/audio_coding/neteq/expand_unittest.cc
@@ -51,14 +51,16 @@
 namespace {
 class FakeStatisticsCalculator : public StatisticsCalculator {
  public:
-  void LogDelayedPacketOutageEvent(int outage_duration_ms) override {
-    last_outage_duration_ms_ = outage_duration_ms;
+  void LogDelayedPacketOutageEvent(int num_samples, int fs_hz) override {
+    last_outage_duration_samples_ = num_samples;
   }
 
-  int last_outage_duration_ms() const { return last_outage_duration_ms_; }
+  int last_outage_duration_samples() const {
+    return last_outage_duration_samples_;
+  }
 
  private:
-  int last_outage_duration_ms_ = 0;
+  int last_outage_duration_samples_ = 0;
 };
 
 // This is the same size that is given to the SyncBuffer object in NetEq.
@@ -120,13 +122,12 @@
     EXPECT_EQ(0, expand_.Process(&output));
     EXPECT_GT(output.Size(), 0u);
     sum_output_len_samples += output.Size();
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
   // Convert |sum_output_len_samples| to milliseconds.
-  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
-                                   (test_sample_rate_hz_ / 1000)),
-            statistics_.last_outage_duration_ms());
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+            statistics_.last_outage_duration_samples());
 }
 
 // This test is similar to DelayedPacketOutage, but ends by calling
@@ -140,10 +141,10 @@
     EXPECT_EQ(0, expand_.Process(&output));
     EXPECT_GT(output.Size(), 0u);
     sum_output_len_samples += output.Size();
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForMergeAfterExpand();
-  EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+  EXPECT_EQ(0, statistics_.last_outage_duration_samples());
 }
 
 // This test is similar to the DelayedPacketOutage test above, but with the
@@ -161,13 +162,12 @@
       expand_.Reset();
       sum_output_len_samples = 0;
     }
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
   // Convert |sum_output_len_samples| to milliseconds.
-  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
-                                   (test_sample_rate_hz_ / 1000)),
-            statistics_.last_outage_duration_ms());
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+            statistics_.last_outage_duration_samples());
 }
 
 namespace {
diff --git a/modules/audio_coding/neteq/include/neteq.h b/modules/audio_coding/neteq/include/neteq.h
index 530975f..2820fd8 100644
--- a/modules/audio_coding/neteq/include/neteq.h
+++ b/modules/audio_coding/neteq/include/neteq.h
@@ -72,6 +72,7 @@
   uint64_t jitter_buffer_delay_ms = 0;
   // Below stat is not part of the spec.
   uint64_t voice_concealed_samples = 0;
+  uint64_t delayed_packet_outage_samples = 0;
 };
 
 // Metrics that describe the operations performed in NetEq, and the internal
@@ -112,6 +113,7 @@
     bool enable_post_decode_vad = false;
     size_t max_packets_in_buffer = 50;
     int max_delay_ms = 2000;
+    int min_delay_ms = 0;
     bool enable_fast_accelerate = false;
     bool enable_muted_state = false;
     absl::optional<AudioCodecPairId> codec_pair_id;
@@ -231,13 +233,6 @@
   // statistics are never reset.
   virtual NetEqOperationsAndState GetOperationsAndState() const = 0;
 
-  // Writes the current RTCP statistics to |stats|. The statistics are reset
-  // and a new report period is started with the call.
-  virtual void GetRtcpStatistics(RtcpStatistics* stats) = 0;
-
-  // Same as RtcpStatistics(), but does not reset anything.
-  virtual void GetRtcpStatisticsNoReset(RtcpStatistics* stats) = 0;
-
   // Enables post-decode VAD. When enabled, GetAudio() will return
   // kOutputVADPassive when the signal contains no speech.
   virtual void EnableVad() = 0;
@@ -266,10 +261,6 @@
   // Flushes both the packet buffer and the sync buffer.
   virtual void FlushBuffers() = 0;
 
-  // Current usage of packet-buffer and it's limits.
-  virtual void PacketBufferStatistics(int* current_num_packets,
-                                      int* max_num_packets) const = 0;
-
   // Enables NACK and sets the maximum size of the NACK list, which should be
   // positive and no larger than Nack::kNackListSizeLimit. If NACK is already
   // enabled then the maximum NACK list size is modified accordingly.
diff --git a/modules/audio_coding/neteq/mock/mock_delay_manager.h b/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 9b2ed49..206cea7 100644
--- a/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -20,9 +20,13 @@
 class MockDelayManager : public DelayManager {
  public:
   MockDelayManager(size_t max_packets_in_buffer,
+                   int base_min_target_delay_ms,
                    DelayPeakDetector* peak_detector,
                    const TickTimer* tick_timer)
-      : DelayManager(max_packets_in_buffer, peak_detector, tick_timer) {}
+      : DelayManager(max_packets_in_buffer,
+                     base_min_target_delay_ms,
+                     peak_detector,
+                     tick_timer) {}
   virtual ~MockDelayManager() { Die(); }
   MOCK_METHOD0(Die, void());
   MOCK_CONST_METHOD0(iat_vector, const IATVector&());
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
index 6a2cbae..2a025f3 100644
--- a/modules/audio_coding/neteq/neteq_impl.cc
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -63,6 +63,7 @@
           new DecoderDatabase(decoder_factory, config.codec_pair_id)),
       delay_peak_detector(new DelayPeakDetector(tick_timer.get())),
       delay_manager(new DelayManager(config.max_packets_in_buffer,
+                                     config.min_delay_ms,
                                      delay_peak_detector.get(),
                                      tick_timer.get())),
       dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
@@ -387,20 +388,6 @@
   return result;
 }
 
-void NetEqImpl::GetRtcpStatistics(RtcpStatistics* stats) {
-  rtc::CritScope lock(&crit_sect_);
-  if (stats) {
-    rtcp_.GetStatistics(false, stats);
-  }
-}
-
-void NetEqImpl::GetRtcpStatisticsNoReset(RtcpStatistics* stats) {
-  rtc::CritScope lock(&crit_sect_);
-  if (stats) {
-    rtcp_.GetStatistics(true, stats);
-  }
-}
-
 void NetEqImpl::EnableVad() {
   rtc::CritScope lock(&crit_sect_);
   assert(vad_.get());
@@ -475,12 +462,6 @@
   first_packet_ = true;
 }
 
-void NetEqImpl::PacketBufferStatistics(int* current_num_packets,
-                                       int* max_num_packets) const {
-  rtc::CritScope lock(&crit_sect_);
-  packet_buffer_->BufferStat(current_num_packets, max_num_packets);
-}
-
 void NetEqImpl::EnableNack(size_t max_nack_list_size) {
   rtc::CritScope lock(&crit_sect_);
   if (!nack_enabled_) {
@@ -576,8 +557,6 @@
     // Note: |first_packet_| will be cleared further down in this method, once
     // the packet has been successfully inserted into the packet buffer.
 
-    rtcp_.Init(rtp_header.sequenceNumber);
-
     // Flush the packet buffer and DTMF buffer.
     packet_buffer_->Flush();
     dtmf_buffer_->Flush();
@@ -592,9 +571,6 @@
     timestamp_ = main_timestamp;
   }
 
-  // Update RTCP statistics, only for regular packets.
-  rtcp_.Update(rtp_header, receive_timestamp);
-
   if (nack_enabled_) {
     RTC_DCHECK(nack_);
     if (update_sample_rate_and_channels) {
diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h
index 36990fb..525ae61 100644
--- a/modules/audio_coding/neteq/neteq_impl.h
+++ b/modules/audio_coding/neteq/neteq_impl.h
@@ -22,7 +22,6 @@
 #include "modules/audio_coding/neteq/include/neteq.h"
 #include "modules/audio_coding/neteq/packet.h"
 #include "modules/audio_coding/neteq/random_vector.h"
-#include "modules/audio_coding/neteq/rtcp.h"
 #include "modules/audio_coding/neteq/statistics_calculator.h"
 #include "modules/audio_coding/neteq/tick_timer.h"
 #include "rtc_base/constructormagic.h"
@@ -170,17 +169,10 @@
   // after the call.
   int NetworkStatistics(NetEqNetworkStatistics* stats) override;
 
-  // Writes the current RTCP statistics to |stats|. The statistics are reset
-  // and a new report period is started with the call.
-  void GetRtcpStatistics(RtcpStatistics* stats) override;
-
   NetEqLifetimeStatistics GetLifetimeStatistics() const override;
 
   NetEqOperationsAndState GetOperationsAndState() const override;
 
-  // Same as RtcpStatistics(), but does not reset anything.
-  void GetRtcpStatisticsNoReset(RtcpStatistics* stats) override;
-
   // Enables post-decode VAD. When enabled, GetAudio() will return
   // kOutputVADPassive when the signal contains no speech.
   void EnableVad() override;
@@ -200,9 +192,6 @@
   // Flushes both the packet buffer and the sync buffer.
   void FlushBuffers() override;
 
-  void PacketBufferStatistics(int* current_num_packets,
-                              int* max_num_packets) const override;
-
   void EnableNack(size_t max_nack_list_size) override;
 
   void DisableNack() override;
@@ -395,7 +384,6 @@
       RTC_GUARDED_BY(crit_sect_);
   RandomVector random_vector_ RTC_GUARDED_BY(crit_sect_);
   std::unique_ptr<ComfortNoise> comfort_noise_ RTC_GUARDED_BY(crit_sect_);
-  Rtcp rtcp_ RTC_GUARDED_BY(crit_sect_);
   StatisticsCalculator stats_ RTC_GUARDED_BY(crit_sect_);
   int fs_hz_ RTC_GUARDED_BY(crit_sect_);
   int fs_mult_ RTC_GUARDED_BY(crit_sect_);
diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc
index b772dfa..0e087c8 100644
--- a/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -92,7 +92,8 @@
 
     if (use_mock_delay_manager_) {
       std::unique_ptr<MockDelayManager> mock(new MockDelayManager(
-          config_.max_packets_in_buffer, delay_peak_detector_, tick_timer_));
+          config_.max_packets_in_buffer, config_.min_delay_ms,
+          delay_peak_detector_, tick_timer_));
       mock_delay_manager_ = mock.get();
       EXPECT_CALL(*mock_delay_manager_, set_streaming_mode(false)).Times(1);
       deps.delay_manager = std::move(mock);
diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc
index 1c9b9e7..e8b5023 100644
--- a/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_unittest.cc
@@ -22,12 +22,12 @@
 
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/builtin_audio_decoder_factory.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
 #include "modules/audio_coding/neteq/tools/audio_loop.h"
 #include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
 #include "modules/audio_coding/neteq/tools/neteq_test.h"
 #include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
 #include "rtc_base/ignore_wundef.h"
 #include "rtc_base/messagedigest.h"
 #include "rtc_base/numerics/safe_conversions.h"
@@ -234,7 +234,14 @@
   buffer.resize(digest_->Size());
   digest_->Finish(&buffer[0], buffer.size());
   const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
-  EXPECT_EQ(checksum, result);
+  if (checksum.size() == result.size()) {
+    EXPECT_EQ(checksum, result);
+  } else {
+    // Check result is one the '|'-separated checksums.
+    EXPECT_NE(checksum.find(result), std::string::npos)
+        << result << " should be one of these:\n"
+        << checksum;
+  }
 }
 
 class NetEqDecodingTest : public ::testing::Test {
@@ -258,7 +265,6 @@
   void DecodeAndCompare(const std::string& rtp_file,
                         const std::string& output_checksum,
                         const std::string& network_stats_checksum,
-                        const std::string& rtcp_stats_checksum,
                         bool gen_ref);
 
   static void PopulateRtpInfo(int frame_index,
@@ -366,7 +372,6 @@
     const std::string& rtp_file,
     const std::string& output_checksum,
     const std::string& network_stats_checksum,
-    const std::string& rtcp_stats_checksum,
     bool gen_ref) {
   OpenInputFile(rtp_file);
 
@@ -378,10 +383,6 @@
       gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
   ResultSink network_stats(stat_out_file);
 
-  std::string rtcp_out_file =
-      gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
-  ResultSink rtcp_stats(rtcp_out_file);
-
   packet_ = rtp_source_->NextPacket();
   int i = 0;
   uint64_t last_concealed_samples = 0;
@@ -418,11 +419,6 @@
       EXPECT_NEAR(
           (delta_concealed_samples << 14) / delta_total_samples_received,
           current_network_stats.expand_rate, (2 << 14) / 100.0);
-
-      // Process RTCPstat.
-      RtcpStatistics current_rtcp_stats;
-      neteq_->GetRtcpStatistics(&current_rtcp_stats);
-      ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
     }
   }
 
@@ -430,8 +426,6 @@
   output.VerifyChecksum(output_checksum);
   SCOPED_TRACE("Check network stats.");
   network_stats.VerifyChecksum(network_stats_checksum);
-  SCOPED_TRACE("Check rtcp stats.");
-  rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
 }
 
 void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
@@ -481,14 +475,8 @@
                        "4b2370f5c794741d2a46be5c7935c66ef3fb53e9",
                        "4b2370f5c794741d2a46be5c7935c66ef3fb53e9");
 
-  const std::string rtcp_stats_checksum =
-      PlatformChecksum("b8880bf9fed2487efbddcb8d94b9937a29ae521d",
-                       "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4", "not used",
-                       "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
-                       "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
@@ -501,12 +489,13 @@
   const std::string input_rtp_file =
       webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
 
-  const std::string output_checksum =
-      PlatformChecksum("14a63b3c7b925c82296be4bafc71bec85f2915c2",
-                       "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
-                       "5876e52dda90d5ca433c3726555b907b97c86374",
-                       "14a63b3c7b925c82296be4bafc71bec85f2915c2",
-                       "14a63b3c7b925c82296be4bafc71bec85f2915c2");
+  // Checksum depends on libopus being compiled with or without SSE.
+  const std::string maybe_sse =
+      "14a63b3c7b925c82296be4bafc71bec85f2915c2|"
+      "2c05677daa968d6c68b92adf4affb7cd9bb4d363";
+  const std::string output_checksum = PlatformChecksum(
+      maybe_sse, "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
+      "5876e52dda90d5ca433c3726555b907b97c86374", maybe_sse, maybe_sse);
 
   const std::string network_stats_checksum =
       PlatformChecksum("adb3272498e436d1c019cbfd71610e9510c54497",
@@ -515,15 +504,8 @@
                        "adb3272498e436d1c019cbfd71610e9510c54497",
                        "adb3272498e436d1c019cbfd71610e9510c54497");
 
-  const std::string rtcp_stats_checksum =
-      PlatformChecksum("e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
@@ -536,21 +518,18 @@
   const std::string input_rtp_file =
       webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
 
-  const std::string output_checksum =
-      PlatformChecksum("713af6c92881f5aab1285765ee6680da9d1c06ce",
-                       "3ec991b96872123f1554c03c543ca5d518431e46",
-                       "da9f9a2d94e0c2d67342fad4965d7b91cda50b25",
-                       "713af6c92881f5aab1285765ee6680da9d1c06ce",
-                       "713af6c92881f5aab1285765ee6680da9d1c06ce");
+  const std::string maybe_sse =
+      "713af6c92881f5aab1285765ee6680da9d1c06ce|"
+      "2ac10c4e79aeedd0df2863b079da5848b40f00b5";
+  const std::string output_checksum = PlatformChecksum(
+      maybe_sse, "3ec991b96872123f1554c03c543ca5d518431e46",
+      "da9f9a2d94e0c2d67342fad4965d7b91cda50b25", maybe_sse, maybe_sse);
 
   const std::string network_stats_checksum =
       "bab58dc587d956f326056d7340c96eb9d2d3cc21";
 
-  const std::string rtcp_stats_checksum =
-      "ac27a7f305efb58b39bf123dccee25dee5758e63";
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 // Use fax mode to avoid time-scaling. This is to simplify the testing of
diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc
index 7b70dee..343763b 100644
--- a/modules/audio_coding/neteq/packet_buffer.cc
+++ b/modules/audio_coding/neteq/packet_buffer.cc
@@ -299,9 +299,4 @@
   return false;
 }
 
-void PacketBuffer::BufferStat(int* num_packets, int* max_num_packets) const {
-  *num_packets = static_cast<int>(buffer_.size());
-  *max_num_packets = static_cast<int>(max_number_of_packets_);
-}
-
 }  // namespace webrtc
diff --git a/modules/audio_coding/neteq/packet_buffer.h b/modules/audio_coding/neteq/packet_buffer.h
index 269b957..0f5cd7f 100644
--- a/modules/audio_coding/neteq/packet_buffer.h
+++ b/modules/audio_coding/neteq/packet_buffer.h
@@ -125,8 +125,6 @@
   virtual bool ContainsDtxOrCngPacket(
       const DecoderDatabase* decoder_database) const;
 
-  virtual void BufferStat(int* num_packets, int* max_num_packets) const;
-
   // Static method returning true if |timestamp| is older than |timestamp_limit|
   // but less than |horizon_samples| behind |timestamp_limit|. For instance,
   // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
diff --git a/modules/audio_coding/neteq/rtcp.cc b/modules/audio_coding/neteq/rtcp.cc
deleted file mode 100644
index 6519337..0000000
--- a/modules/audio_coding/neteq/rtcp.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_coding/neteq/rtcp.h"
-
-#include <algorithm>
-#include <cstdlib>
-
-#include "api/rtp_headers.h"
-#include "common_types.h"
-#include "rtc_base/checks.h"
-
-namespace webrtc {
-
-void Rtcp::Init(uint16_t start_sequence_number) {
-  cycles_ = 0;
-  max_seq_no_ = start_sequence_number;
-  base_seq_no_ = start_sequence_number;
-  received_packets_ = 0;
-  received_packets_prior_ = 0;
-  expected_prior_ = 0;
-  jitter_ = 0;
-  transit_ = 0;
-}
-
-void Rtcp::Update(const RTPHeader& rtp_header, uint32_t receive_timestamp) {
-  // Update number of received packets, and largest packet number received.
-  received_packets_++;
-  int16_t sn_diff = rtp_header.sequenceNumber - max_seq_no_;
-  if (sn_diff >= 0) {
-    if (rtp_header.sequenceNumber < max_seq_no_) {
-      // Wrap-around detected.
-      cycles_++;
-    }
-    max_seq_no_ = rtp_header.sequenceNumber;
-  }
-
-  // Calculate jitter according to RFC 3550, and update previous timestamps.
-  // Note that the value in |jitter_| is in Q4.
-  if (received_packets_ > 1) {
-    int32_t ts_diff = receive_timestamp - (rtp_header.timestamp - transit_);
-    int64_t jitter_diff = (std::abs(int64_t{ts_diff}) << 4) - jitter_;
-    // Calculate 15 * jitter_ / 16 + jitter_diff / 16 (with proper rounding).
-    jitter_ = jitter_ + ((jitter_diff + 8) >> 4);
-    RTC_DCHECK_GE(jitter_, 0);
-  }
-  transit_ = rtp_header.timestamp - receive_timestamp;
-}
-
-void Rtcp::GetStatistics(bool no_reset, RtcpStatistics* stats) {
-  // Extended highest sequence number received.
-  stats->extended_highest_sequence_number =
-      (static_cast<int>(cycles_) << 16) + max_seq_no_;
-
-  // Calculate expected number of packets and compare it with the number of
-  // packets that were actually received. The cumulative number of lost packets
-  // can be extracted.
-  uint32_t expected_packets =
-      stats->extended_highest_sequence_number - base_seq_no_ + 1;
-  if (received_packets_ == 0) {
-    // No packets received, assume none lost.
-    stats->packets_lost = 0;
-  } else if (expected_packets > received_packets_) {
-    stats->packets_lost = expected_packets - received_packets_;
-    if (stats->packets_lost > 0xFFFFFF) {
-      stats->packets_lost = 0xFFFFFF;
-    }
-  } else {
-    stats->packets_lost = 0;
-  }
-
-  // Fraction lost since last report.
-  uint32_t expected_since_last = expected_packets - expected_prior_;
-  uint32_t received_since_last = received_packets_ - received_packets_prior_;
-  if (!no_reset) {
-    expected_prior_ = expected_packets;
-    received_packets_prior_ = received_packets_;
-  }
-  int32_t lost = expected_since_last - received_since_last;
-  if (expected_since_last == 0 || lost <= 0 || received_packets_ == 0) {
-    stats->fraction_lost = 0;
-  } else {
-    stats->fraction_lost = std::min(0xFFU, (lost << 8) / expected_since_last);
-  }
-
-  stats->jitter = jitter_ >> 4;  // Scaling from Q4.
-}
-
-}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/rtcp.h b/modules/audio_coding/neteq/rtcp.h
deleted file mode 100644
index 60c2673..0000000
--- a/modules/audio_coding/neteq/rtcp.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CODING_NETEQ_RTCP_H_
-#define MODULES_AUDIO_CODING_NETEQ_RTCP_H_
-
-#include <stdint.h>
-
-#include "rtc_base/constructormagic.h"
-
-namespace webrtc {
-
-struct RtcpStatistics;
-struct RTPHeader;
-
-class Rtcp {
- public:
-  Rtcp() { Init(0); }
-
-  ~Rtcp() {}
-
-  // Resets the RTCP statistics, and sets the first received sequence number.
-  void Init(uint16_t start_sequence_number);
-
-  // Updates the RTCP statistics with a new received packet.
-  void Update(const RTPHeader& rtp_header, uint32_t receive_timestamp);
-
-  // Returns the current RTCP statistics. If |no_reset| is true, the statistics
-  // are not reset, otherwise they are.
-  void GetStatistics(bool no_reset, RtcpStatistics* stats);
-
- private:
-  uint16_t cycles_;       // The number of wrap-arounds for the sequence number.
-  uint16_t max_seq_no_;   // The maximum sequence number received. Starts over
-                          // from 0 after wrap-around.
-  uint16_t base_seq_no_;  // The sequence number of the first received packet.
-  uint32_t received_packets_;  // The number of packets that have been received.
-  uint32_t received_packets_prior_;  // Number of packets received when last
-                                     // report was generated.
-  uint32_t expected_prior_;  // Expected number of packets, at the time of the
-                             // last report.
-  int64_t jitter_;           // Current jitter value in Q4.
-  int32_t transit_;          // Clock difference for previous packet.
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(Rtcp);
-};
-
-}  // namespace webrtc
-#endif  // MODULES_AUDIO_CODING_NETEQ_RTCP_H_
diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc
index 807d7ee..50521fb 100644
--- a/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/modules/audio_coding/neteq/statistics_calculator.cc
@@ -257,11 +257,14 @@
   buffer_full_counter_.RegisterSample();
 }
 
-void StatisticsCalculator::LogDelayedPacketOutageEvent(int outage_duration_ms) {
+void StatisticsCalculator::LogDelayedPacketOutageEvent(int num_samples,
+                                                       int fs_hz) {
+  int outage_duration_ms = num_samples / (fs_hz / 1000);
   RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
                        outage_duration_ms, 1 /* min */, 2000 /* max */,
                        100 /* bucket count */);
   delayed_packet_outage_counter_.RegisterSample();
+  lifetime_stats_.delayed_packet_outage_samples += num_samples;
 }
 
 void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h
index 6a5f7f4..49b74a0 100644
--- a/modules/audio_coding/neteq/statistics_calculator.h
+++ b/modules/audio_coding/neteq/statistics_calculator.h
@@ -86,10 +86,10 @@
   // Rerport that the packet buffer was flushed.
   void FlushedPacketBuffer();
 
-  // Logs a delayed packet outage event of |outage_duration_ms|. A delayed
-  // packet outage event is defined as an expand period caused not by an actual
-  // packet loss, but by a delayed packet.
-  virtual void LogDelayedPacketOutageEvent(int outage_duration_ms);
+  // Logs a delayed packet outage event of |num_samples| expanded at a sample
+  // rate of |fs_hz|. A delayed packet outage event is defined as an expand
+  // period caused not by an actual packet loss, but by a delayed packet.
+  virtual void LogDelayedPacketOutageEvent(int num_samples, int fs_hz);
 
   // Returns the current network statistics in |stats|. The current sample rate
   // is |fs_hz|, the total number of samples in packet buffer and sync buffer
diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn
index a244c84..66b07df 100644
--- a/modules/audio_processing/BUILD.gn
+++ b/modules/audio_processing/BUILD.gn
@@ -329,6 +329,7 @@
   deps = [
     "../../api:array_view",
     "../../common_audio:common_audio",
+    "../../rtc_base:checks",
     "../../rtc_base:rtc_base_approved",
     "../../rtc_base:stringutils",
   ]
@@ -413,6 +414,7 @@
       "../../rtc_base:protobuf_utils",
       "../../rtc_base:rtc_base",
       "../../rtc_base:rtc_base_approved",
+      "../../rtc_base:rtc_base_tests_utils",
       "../../rtc_base:safe_minmax",
       "../../rtc_base/system:arch",
       "../../rtc_base/system:file_wrapper",
@@ -429,6 +431,7 @@
       "agc2:biquad_filter_unittests",
       "agc2:fixed_digital_unittests",
       "agc2:noise_estimator_unittests",
+      "agc2:rnn_vad_with_level_unittests",
       "agc2:test_utils",
       "agc2/rnn_vad:unittests",
       "test/conversational_speech:unittest",
@@ -457,7 +460,6 @@
         ":audioproc_unittest_proto",
         ":runtime_settings_protobuf_utils",
         "../../api/audio:audio_frame_api",
-        "../../rtc_base:rtc_base_tests_utils",
         "../../rtc_base:rtc_task_queue",
         "aec_dump",
         "aec_dump:aec_dump_unittests",
@@ -588,6 +590,7 @@
         "aec_dump:aec_dump_impl",
         "//testing/gtest",
         "//third_party/abseil-cpp/absl/memory",
+        "//third_party/abseil-cpp/absl/strings",
         "//third_party/abseil-cpp/absl/types:optional",
       ]
     }  # audioproc_f_impl
diff --git a/modules/audio_processing/aec3/BUILD.gn b/modules/audio_processing/aec3/BUILD.gn
index c3f6dd5..189bcfd 100644
--- a/modules/audio_processing/aec3/BUILD.gn
+++ b/modules/audio_processing/aec3/BUILD.gn
@@ -20,6 +20,8 @@
     "aec3_fft.h",
     "aec_state.cc",
     "aec_state.h",
+    "api_call_jitter_metrics.cc",
+    "api_call_jitter_metrics.h",
     "block_delay_buffer.cc",
     "block_delay_buffer.h",
     "block_framer.cc",
@@ -31,6 +33,8 @@
     "block_processor_metrics.h",
     "cascaded_biquad_filter.cc",
     "cascaded_biquad_filter.h",
+    "clockdrift_detector.cc",
+    "clockdrift_detector.h",
     "comfort_noise_generator.cc",
     "comfort_noise_generator.h",
     "decimator.cc",
@@ -101,6 +105,8 @@
     "reverb_model_fallback.h",
     "shadow_filter_update_gain.cc",
     "shadow_filter_update_gain.h",
+    "signal_dependent_erle_estimator.cc",
+    "signal_dependent_erle_estimator.h",
     "skew_estimator.cc",
     "skew_estimator.h",
     "stationarity_estimator.cc",
@@ -188,11 +194,13 @@
         "adaptive_fir_filter_unittest.cc",
         "aec3_fft_unittest.cc",
         "aec_state_unittest.cc",
+        "api_call_jitter_metrics_unittest.cc",
         "block_delay_buffer_unittest.cc",
         "block_framer_unittest.cc",
         "block_processor_metrics_unittest.cc",
         "block_processor_unittest.cc",
         "cascaded_biquad_filter_unittest.cc",
+        "clockdrift_detector_unittest.cc",
         "comfort_noise_generator_unittest.cc",
         "decimator_unittest.cc",
         "echo_canceller3_unittest.cc",
@@ -216,6 +224,7 @@
         "residual_echo_estimator_unittest.cc",
         "reverb_model_estimator_unittest.cc",
         "shadow_filter_update_gain_unittest.cc",
+        "signal_dependent_erle_estimator_unittest.cc",
         "skew_estimator_unittest.cc",
         "subtractor_unittest.cc",
         "suppression_filter_unittest.cc",
diff --git a/modules/audio_processing/aec3/aec_state.cc b/modules/audio_processing/aec3/aec_state.cc
index 0eeb7eb..45b361f 100644
--- a/modules/audio_processing/aec3/aec_state.cc
+++ b/modules/audio_processing/aec3/aec_state.cc
@@ -91,10 +91,7 @@
       legacy_filter_quality_state_(config_),
       legacy_saturation_detector_(config_),
       erl_estimator_(2 * kNumBlocksPerSecond),
-      erle_estimator_(2 * kNumBlocksPerSecond,
-                      config_.erle.min,
-                      config_.erle.max_l,
-                      config_.erle.max_h),
+      erle_estimator_(2 * kNumBlocksPerSecond, config_),
       suppression_gain_limiter_(config_),
       filter_analyzer_(config_),
       echo_audibility_(
@@ -154,8 +151,7 @@
   subtractor_output_analyzer_.Update(subtractor_output);
 
   // Analyze the properties of the filter.
-  filter_analyzer_.Update(adaptive_filter_impulse_response,
-                          adaptive_filter_frequency_response, render_buffer);
+  filter_analyzer_.Update(adaptive_filter_impulse_response, render_buffer);
 
   // Estimate the direct path delay of the filter.
   delay_state_.Update(filter_analyzer_, external_delay,
@@ -210,7 +206,8 @@
   const auto& X2_input_erle =
       enable_erle_updates_during_reverb_ ? X2_reverb : X2;
 
-  erle_estimator_.Update(X2_input_erle, Y2, E2_main,
+  erle_estimator_.Update(render_buffer, adaptive_filter_frequency_response,
+                         X2_input_erle, Y2, E2_main,
                          subtractor_output_analyzer_.ConvergedFilter(),
                          config_.erle.onset_detection);
 
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics.cc b/modules/audio_processing/aec3/api_call_jitter_metrics.cc
new file mode 100644
index 0000000..45f56a5
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics.cc
@@ -0,0 +1,121 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+bool TimeToReportMetrics(int frames_since_last_report) {
+  constexpr int kNumFramesPerSecond = 100;
+  constexpr int kReportingIntervalFrames = 10 * kNumFramesPerSecond;
+  return frames_since_last_report == kReportingIntervalFrames;
+}
+
+}  // namespace
+
+ApiCallJitterMetrics::Jitter::Jitter()
+    : max_(0), min_(std::numeric_limits<int>::max()) {}
+
+void ApiCallJitterMetrics::Jitter::Update(int num_api_calls_in_a_row) {
+  min_ = std::min(min_, num_api_calls_in_a_row);
+  max_ = std::max(max_, num_api_calls_in_a_row);
+}
+
+void ApiCallJitterMetrics::Jitter::Reset() {
+  min_ = std::numeric_limits<int>::max();
+  max_ = 0;
+}
+
+void ApiCallJitterMetrics::Reset() {
+  render_jitter_.Reset();
+  capture_jitter_.Reset();
+  num_api_calls_in_a_row_ = 0;
+  frames_since_last_report_ = 0;
+  last_call_was_render_ = false;
+  proper_call_observed_ = false;
+}
+
+void ApiCallJitterMetrics::ReportRenderCall() {
+  if (!last_call_was_render_) {
+    // If the previous call was a capture and a proper call has been observed
+    // (containing both render and capture data), storing the last number of
+    // capture calls into the metrics.
+    if (proper_call_observed_) {
+      capture_jitter_.Update(num_api_calls_in_a_row_);
+    }
+
+    // Reset the call counter to start counting render calls.
+    num_api_calls_in_a_row_ = 0;
+  }
+  ++num_api_calls_in_a_row_;
+  last_call_was_render_ = true;
+}
+
+void ApiCallJitterMetrics::ReportCaptureCall() {
+  if (last_call_was_render_) {
+    // If the previous call was a render and a proper call has been observed
+    // (containing both render and capture data), storing the last number of
+    // render calls into the metrics.
+    if (proper_call_observed_) {
+      render_jitter_.Update(num_api_calls_in_a_row_);
+    }
+    // Reset the call counter to start counting capture calls.
+    num_api_calls_in_a_row_ = 0;
+
+    // If this statement is reached, at least one render and one capture call
+    // have been observed.
+    proper_call_observed_ = true;
+  }
+  ++num_api_calls_in_a_row_;
+  last_call_was_render_ = false;
+
+  // Only report and update jitter metrics for when a proper call, containing
+  // both render and capture data, has been observed.
+  if (proper_call_observed_ &&
+      TimeToReportMetrics(++frames_since_last_report_)) {
+    // Report jitter, where the base basic unit is frames.
+    constexpr int kMaxJitterToReport = 50;
+
+    // Report max and min jitter for render and capture, in units of 20 ms.
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MaxRenderJitter",
+        std::min(kMaxJitterToReport, render_jitter().max()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MinRenderJitter",
+        std::min(kMaxJitterToReport, render_jitter().min()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MaxCaptureJitter",
+        std::min(kMaxJitterToReport, capture_jitter().max()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MinCaptureJitter",
+        std::min(kMaxJitterToReport, capture_jitter().min()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+
+    frames_since_last_report_ = 0;
+    Reset();
+  }
+}
+
+bool ApiCallJitterMetrics::WillReportMetricsAtNextCapture() const {
+  return TimeToReportMetrics(frames_since_last_report_ + 1);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics.h b/modules/audio_processing/aec3/api_call_jitter_metrics.h
new file mode 100644
index 0000000..dd1fa82
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+
+namespace webrtc {
+
+// Stores data for reporting metrics on the API call jitter.
+class ApiCallJitterMetrics {
+ public:
+  class Jitter {
+   public:
+    Jitter();
+    void Update(int num_api_calls_in_a_row);
+    void Reset();
+
+    int min() const { return min_; }
+    int max() const { return max_; }
+
+   private:
+    int max_;
+    int min_;
+  };
+
+  ApiCallJitterMetrics() { Reset(); }
+
+  // Update metrics for render API call.
+  void ReportRenderCall();
+
+  // Update and periodically report metrics for capture API call.
+  void ReportCaptureCall();
+
+  // Methods used only for testing.
+  const Jitter& render_jitter() const { return render_jitter_; }
+  const Jitter& capture_jitter() const { return capture_jitter_; }
+  bool WillReportMetricsAtNextCapture() const;
+
+ private:
+  void Reset();
+
+  Jitter render_jitter_;
+  Jitter capture_jitter_;
+
+  int num_api_calls_in_a_row_ = 0;
+  int frames_since_last_report_ = 0;
+  bool last_call_was_render_ = false;
+  bool proper_call_observed_ = false;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc b/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
new file mode 100644
index 0000000..86608aa
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify constant jitter.
+TEST(ApiCallJitterMetrics, ConstantJitter) {
+  for (int jitter = 1; jitter < 20; ++jitter) {
+    ApiCallJitterMetrics metrics;
+    for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+      for (int j = 0; j < jitter; ++j) {
+        metrics.ReportRenderCall();
+      }
+
+      for (int j = 0; j < jitter; ++j) {
+        metrics.ReportCaptureCall();
+
+        if (metrics.WillReportMetricsAtNextCapture()) {
+          EXPECT_EQ(jitter, metrics.render_jitter().min());
+          EXPECT_EQ(jitter, metrics.render_jitter().max());
+          EXPECT_EQ(jitter, metrics.capture_jitter().min());
+          EXPECT_EQ(jitter, metrics.capture_jitter().max());
+        }
+      }
+    }
+  }
+}
+
+// Verify peaky jitter for the render.
+TEST(ApiCallJitterMetrics, JitterPeakRender) {
+  constexpr int kMinJitter = 2;
+  constexpr int kJitterPeak = 10;
+  constexpr int kPeakInterval = 100;
+
+  ApiCallJitterMetrics metrics;
+  int render_surplus = 0;
+
+  for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+    const int num_render_calls =
+        k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+    for (int j = 0; j < num_render_calls; ++j) {
+      metrics.ReportRenderCall();
+      ++render_surplus;
+    }
+
+    ASSERT_LE(kMinJitter, render_surplus);
+    const int num_capture_calls =
+        render_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+    for (int j = 0; j < num_capture_calls; ++j) {
+      metrics.ReportCaptureCall();
+
+      if (metrics.WillReportMetricsAtNextCapture()) {
+        EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+        EXPECT_EQ(kJitterPeak, metrics.render_jitter().max());
+        EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+        EXPECT_EQ(kMinJitter + 1, metrics.capture_jitter().max());
+      }
+      --render_surplus;
+    }
+  }
+}
+
+// Verify peaky jitter for the capture.
+TEST(ApiCallJitterMetrics, JitterPeakCapture) {
+  constexpr int kMinJitter = 2;
+  constexpr int kJitterPeak = 10;
+  constexpr int kPeakInterval = 100;
+
+  ApiCallJitterMetrics metrics;
+  int capture_surplus = kMinJitter;
+
+  for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+    ASSERT_LE(kMinJitter, capture_surplus);
+    const int num_render_calls =
+        capture_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+    for (int j = 0; j < num_render_calls; ++j) {
+      metrics.ReportRenderCall();
+      --capture_surplus;
+    }
+
+    const int num_capture_calls =
+        k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+    for (int j = 0; j < num_capture_calls; ++j) {
+      metrics.ReportCaptureCall();
+
+      if (metrics.WillReportMetricsAtNextCapture()) {
+        EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+        EXPECT_EQ(kMinJitter + 1, metrics.render_jitter().max());
+        EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+        EXPECT_EQ(kJitterPeak, metrics.capture_jitter().max());
+      }
+      ++capture_surplus;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor.cc b/modules/audio_processing/aec3/block_processor.cc
index 590380f..ef25e7c 100644
--- a/modules/audio_processing/aec3/block_processor.cc
+++ b/modules/audio_processing/aec3/block_processor.cc
@@ -194,6 +194,8 @@
     }
   }
 
+  echo_path_variability.clock_drift = delay_controller_->HasClockdrift();
+
   // Remove the echo from the capture signal.
   echo_remover_->ProcessCapture(
       echo_path_variability, capture_signal_saturation, estimated_delay_,
diff --git a/modules/audio_processing/aec3/block_processor2.cc b/modules/audio_processing/aec3/block_processor2.cc
index 3616427..30bd3ee 100644
--- a/modules/audio_processing/aec3/block_processor2.cc
+++ b/modules/audio_processing/aec3/block_processor2.cc
@@ -166,6 +166,8 @@
     }
   }
 
+  echo_path_variability.clock_drift = delay_controller_->HasClockdrift();
+
   // Remove the echo from the capture signal.
   echo_remover_->ProcessCapture(
       echo_path_variability, capture_signal_saturation, estimated_delay_,
diff --git a/modules/audio_processing/aec3/clockdrift_detector.cc b/modules/audio_processing/aec3/clockdrift_detector.cc
new file mode 100644
index 0000000..2c49b79
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+namespace webrtc {
+
+ClockdriftDetector::ClockdriftDetector()
+    : level_(Level::kNone), stability_counter_(0) {
+  delay_history_.fill(0);
+}
+
+ClockdriftDetector::~ClockdriftDetector() = default;
+
+void ClockdriftDetector::Update(int delay_estimate) {
+  if (delay_estimate == delay_history_[0]) {
+    // Reset clockdrift level if delay estimate is stable for 7500 blocks (30
+    // seconds).
+    if (++stability_counter_ > 7500)
+      level_ = Level::kNone;
+    return;
+  }
+
+  stability_counter_ = 0;
+  const int d1 = delay_history_[0] - delay_estimate;
+  const int d2 = delay_history_[1] - delay_estimate;
+  const int d3 = delay_history_[2] - delay_estimate;
+
+  // Patterns recognized as positive clockdrift:
+  // [x-3], x-2, x-1, x.
+  // [x-3], x-1, x-2, x.
+  const bool probable_drift_up =
+      (d1 == -1 && d2 == -2) || (d1 == -2 && d2 == -1);
+  const bool drift_up = probable_drift_up && d3 == -3;
+
+  // Patterns recognized as negative clockdrift:
+  // [x+3], x+2, x+1, x.
+  // [x+3], x+1, x+2, x.
+  const bool probable_drift_down = (d1 == 1 && d2 == 2) || (d1 == 2 && d2 == 1);
+  const bool drift_down = probable_drift_down && d3 == 3;
+
+  // Set clockdrift level.
+  if (drift_up || drift_down) {
+    level_ = Level::kVerified;
+  } else if ((probable_drift_up || probable_drift_down) &&
+             level_ == Level::kNone) {
+    level_ = Level::kProbable;
+  }
+
+  // Shift delay history one step.
+  delay_history_[2] = delay_history_[1];
+  delay_history_[1] = delay_history_[0];
+  delay_history_[0] = delay_estimate;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/clockdrift_detector.h b/modules/audio_processing/aec3/clockdrift_detector.h
new file mode 100644
index 0000000..22528c9
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+
+#include <array>
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct DownsampledRenderBuffer;
+struct EchoCanceller3Config;
+
+// Detects clockdrift by analyzing the estimated delay.
+class ClockdriftDetector {
+ public:
+  enum class Level { kNone, kProbable, kVerified, kNumCategories };
+  ClockdriftDetector();
+  ~ClockdriftDetector();
+  void Update(int delay_estimate);
+  Level ClockdriftLevel() const { return level_; }
+
+ private:
+  std::array<int, 3> delay_history_;
+  Level level_;
+  size_t stability_counter_;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
diff --git a/modules/audio_processing/aec3/clockdrift_detector_unittest.cc b/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
new file mode 100644
index 0000000..0f98b01
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+TEST(ClockdriftDetector, ClockdriftDetector) {
+  ClockdriftDetector c;
+  // No clockdrift at start.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+  // Monotonically increasing delay.
+  for (int i = 0; i < 100; i++)
+    c.Update(1000);
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+  for (int i = 0; i < 100; i++)
+    c.Update(1001);
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+  for (int i = 0; i < 100; i++)
+    c.Update(1002);
+  // Probable clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+  for (int i = 0; i < 100; i++)
+    c.Update(1003);
+  // Verified clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+
+  // Stable delay.
+  for (int i = 0; i < 10000; i++)
+    c.Update(1003);
+  // No clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+  // Decreasing delay.
+  for (int i = 0; i < 100; i++)
+    c.Update(1001);
+  for (int i = 0; i < 100; i++)
+    c.Update(999);
+  // Probable clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+  for (int i = 0; i < 100; i++)
+    c.Update(1000);
+  for (int i = 0; i < 100; i++)
+    c.Update(998);
+  // Verified clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_canceller3.cc b/modules/audio_processing/aec3/echo_canceller3.cc
index 5debcda..f05edb1 100644
--- a/modules/audio_processing/aec3/echo_canceller3.cc
+++ b/modules/audio_processing/aec3/echo_canceller3.cc
@@ -68,14 +68,12 @@
   return field_trial::IsEnabled("WebRTC-Aec3UseLegacyNormalSuppressorTuning");
 }
 
-bool DeactivateStationarityProperties() {
-  return field_trial::IsEnabled(
-      "WebRTC-Aec3UseStationarityPropertiesKillSwitch");
+bool ActivateStationarityProperties() {
+  return field_trial::IsEnabled("WebRTC-Aec3UseStationarityProperties");
 }
 
-bool DeactivateStationarityPropertiesAtInit() {
-  return field_trial::IsEnabled(
-      "WebRTC-Aec3UseStationarityPropertiesAtInitKillSwitch");
+bool ActivateStationarityPropertiesAtInit() {
+  return field_trial::IsEnabled("WebRTC-Aec3UseStationarityPropertiesAtInit");
 }
 
 bool EnableNewRenderBuffering() {
@@ -154,15 +152,12 @@
     adjusted_cfg.suppressor.dominant_nearend_detection.hold_duration = 25;
   }
 
-  // TODO(peah): Clean this up once upstream dependencies that forces this to
-  // zero are resolved.
-  adjusted_cfg.echo_audibility.use_stationary_properties = true;
-  if (DeactivateStationarityProperties()) {
-    adjusted_cfg.echo_audibility.use_stationary_properties = false;
+  if (ActivateStationarityProperties()) {
+    adjusted_cfg.echo_audibility.use_stationary_properties = true;
   }
 
-  if (DeactivateStationarityPropertiesAtInit()) {
-    adjusted_cfg.echo_audibility.use_stationarity_properties_at_init = false;
+  if (ActivateStationarityPropertiesAtInit()) {
+    adjusted_cfg.echo_audibility.use_stationarity_properties_at_init = true;
   }
 
   if (!UseEarlyDelayDetection()) {
@@ -451,6 +446,10 @@
   data_dumper_->DumpRaw("aec3_call_order",
                         static_cast<int>(EchoCanceller3ApiCall::kCapture));
 
+  // Report capture call in the metrics and periodically update API call
+  // metrics.
+  api_call_metrics_.ReportCaptureCall();
+
   // Optionally delay the capture signal.
   if (config_.delay.fixed_capture_delay_samples > 0) {
     block_delay_buffer_.DelaySignal(capture);
@@ -505,6 +504,9 @@
   bool frame_to_buffer =
       render_transfer_queue_.Remove(&render_queue_output_frame_);
   while (frame_to_buffer) {
+    // Report render call in the metrics.
+    api_call_metrics_.ReportRenderCall();
+
     BufferRenderFrameContent(&render_queue_output_frame_, 0, &render_blocker_,
                              block_processor_.get(), &block_, &sub_frame_view_);
 
diff --git a/modules/audio_processing/aec3/echo_canceller3.h b/modules/audio_processing/aec3/echo_canceller3.h
index 0d07702..671d271 100644
--- a/modules/audio_processing/aec3/echo_canceller3.h
+++ b/modules/audio_processing/aec3/echo_canceller3.h
@@ -18,6 +18,7 @@
 #include "api/array_view.h"
 #include "api/audio/echo_canceller3_config.h"
 #include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
 #include "modules/audio_processing/aec3/block_delay_buffer.h"
 #include "modules/audio_processing/aec3/block_framer.h"
 #include "modules/audio_processing/aec3/block_processor.h"
@@ -140,6 +141,7 @@
   std::vector<rtc::ArrayView<float>> sub_frame_view_
       RTC_GUARDED_BY(capture_race_checker_);
   BlockDelayBuffer block_delay_buffer_ RTC_GUARDED_BY(capture_race_checker_);
+  ApiCallJitterMetrics api_call_metrics_ RTC_GUARDED_BY(capture_race_checker_);
 
   RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCanceller3);
 };
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.cc b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
index 5c838ae..6069ed6 100644
--- a/modules/audio_processing/aec3/echo_path_delay_estimator.cc
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
@@ -73,6 +73,12 @@
       matched_filter_lag_aggregator_.Aggregate(
           matched_filter_.GetLagEstimates());
 
+  // Run clockdrift detection.
+  if (aggregated_matched_filter_lag &&
+      (*aggregated_matched_filter_lag).quality ==
+          DelayEstimate::Quality::kRefined)
+    clockdrift_detector_.Update((*aggregated_matched_filter_lag).delay);
+
   // TODO(peah): Move this logging outside of this class once EchoCanceller3
   // development is done.
   data_dumper_->DumpRaw(
@@ -112,5 +118,4 @@
   old_aggregated_lag_ = absl::nullopt;
   consistent_estimate_counter_ = 0;
 }
-
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.h b/modules/audio_processing/aec3/echo_path_delay_estimator.h
index 060c875..1f14735 100644
--- a/modules/audio_processing/aec3/echo_path_delay_estimator.h
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.h
@@ -15,6 +15,7 @@
 
 #include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
 #include "modules/audio_processing/aec3/decimator.h"
 #include "modules/audio_processing/aec3/delay_estimate.h"
 #include "modules/audio_processing/aec3/matched_filter.h"
@@ -49,6 +50,11 @@
                                         down_sampling_factor_);
   }
 
+  // Returns the level of detected clockdrift.
+  ClockdriftDetector::Level Clockdrift() const {
+    return clockdrift_detector_.ClockdriftLevel();
+  }
+
  private:
   ApmDataDumper* const data_dumper_;
   const size_t down_sampling_factor_;
@@ -58,6 +64,7 @@
   MatchedFilterLagAggregator matched_filter_lag_aggregator_;
   absl::optional<DelayEstimate> old_aggregated_lag_;
   size_t consistent_estimate_counter_ = 0;
+  ClockdriftDetector clockdrift_detector_;
 
   // Internal reset method with more granularity.
   void Reset(bool reset_lag_aggregator, bool reset_delay_confidence);
diff --git a/modules/audio_processing/aec3/erle_estimator.cc b/modules/audio_processing/aec3/erle_estimator.cc
index 539a59b..656a9c7 100644
--- a/modules/audio_processing/aec3/erle_estimator.cc
+++ b/modules/audio_processing/aec3/erle_estimator.cc
@@ -10,20 +10,18 @@
 
 #include "modules/audio_processing/aec3/erle_estimator.h"
 
-#include "api/array_view.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
-#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 
 namespace webrtc {
 
 ErleEstimator::ErleEstimator(size_t startup_phase_length_blocks_,
-                             float min_erle,
-                             float max_erle_lf,
-                             float max_erle_hf)
+                             const EchoCanceller3Config& config)
     : startup_phase_length_blocks__(startup_phase_length_blocks_),
-      fullband_erle_estimator_(min_erle, max_erle_lf),
-      subband_erle_estimator_(min_erle, max_erle_lf, max_erle_hf) {
+      use_signal_dependent_erle_(config.erle.num_sections > 1),
+      fullband_erle_estimator_(config.erle.min, config.erle.max_l),
+      subband_erle_estimator_(config),
+      signal_dependent_erle_estimator_(config) {
   Reset(true);
 }
 
@@ -32,16 +30,21 @@
 void ErleEstimator::Reset(bool delay_change) {
   fullband_erle_estimator_.Reset();
   subband_erle_estimator_.Reset();
+  signal_dependent_erle_estimator_.Reset();
   if (delay_change) {
     blocks_since_reset_ = 0;
   }
 }
 
-void ErleEstimator::Update(rtc::ArrayView<const float> reverb_render_spectrum,
-                           rtc::ArrayView<const float> capture_spectrum,
-                           rtc::ArrayView<const float> subtractor_spectrum,
-                           bool converged_filter,
-                           bool onset_detection) {
+void ErleEstimator::Update(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<const float> reverb_render_spectrum,
+    rtc::ArrayView<const float> capture_spectrum,
+    rtc::ArrayView<const float> subtractor_spectrum,
+    bool converged_filter,
+    bool onset_detection) {
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, reverb_render_spectrum.size());
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, capture_spectrum.size());
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, subtractor_spectrum.size());
@@ -55,6 +58,13 @@
 
   subband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filter,
                                  onset_detection);
+
+  if (use_signal_dependent_erle_) {
+    signal_dependent_erle_estimator_.Update(
+        render_buffer, filter_frequency_response, X2_reverb, Y2, E2,
+        subband_erle_estimator_.Erle(), converged_filter);
+  }
+
   fullband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filter);
 }
 
@@ -62,6 +72,7 @@
     const std::unique_ptr<ApmDataDumper>& data_dumper) const {
   fullband_erle_estimator_.Dump(data_dumper);
   subband_erle_estimator_.Dump(data_dumper);
+  signal_dependent_erle_estimator_.Dump(data_dumper);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erle_estimator.h b/modules/audio_processing/aec3/erle_estimator.h
index 2d2c3ae..8036c21 100644
--- a/modules/audio_processing/aec3/erle_estimator.h
+++ b/modules/audio_processing/aec3/erle_estimator.h
@@ -17,8 +17,11 @@
 
 #include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
 #include "modules/audio_processing/aec3/fullband_erle_estimator.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
 #include "modules/audio_processing/aec3/subband_erle_estimator.h"
 #include "modules/audio_processing/logging/apm_data_dumper.h"
 
@@ -29,16 +32,17 @@
 class ErleEstimator {
  public:
   ErleEstimator(size_t startup_phase_length_blocks_,
-                float min_erle,
-                float max_erle_lf,
-                float max_erle_hf);
+                const EchoCanceller3Config& config);
   ~ErleEstimator();
 
   // Resets the fullband ERLE estimator and the subbands ERLE estimators.
   void Reset(bool delay_change);
 
   // Updates the ERLE estimates.
-  void Update(rtc::ArrayView<const float> reverb_render_spectrum,
+  void Update(const RenderBuffer& render_buffer,
+              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                  filter_frequency_response,
+              rtc::ArrayView<const float> reverb_render_spectrum,
               rtc::ArrayView<const float> capture_spectrum,
               rtc::ArrayView<const float> subtractor_spectrum,
               bool converged_filter,
@@ -46,11 +50,12 @@
 
   // Returns the most recent subband ERLE estimates.
   const std::array<float, kFftLengthBy2Plus1>& Erle() const {
-    return subband_erle_estimator_.Erle();
+    return use_signal_dependent_erle_ ? signal_dependent_erle_estimator_.Erle()
+                                      : subband_erle_estimator_.Erle();
   }
   // Returns the subband ERLE that are estimated during onsets. Used
   // for logging/testing.
-  const std::array<float, kFftLengthBy2Plus1>& ErleOnsets() const {
+  rtc::ArrayView<const float> ErleOnsets() const {
     return subband_erle_estimator_.ErleOnsets();
   }
 
@@ -71,8 +76,10 @@
 
  private:
   const size_t startup_phase_length_blocks__;
+  const bool use_signal_dependent_erle_;
   FullBandErleEstimator fullband_erle_estimator_;
   SubbandErleEstimator subband_erle_estimator_;
+  SignalDependentErleEstimator signal_dependent_erle_estimator_;
   size_t blocks_since_reset_ = 0;
 };
 
diff --git a/modules/audio_processing/aec3/erle_estimator_unittest.cc b/modules/audio_processing/aec3/erle_estimator_unittest.cc
index 2cb050a..59a7471 100644
--- a/modules/audio_processing/aec3/erle_estimator_unittest.cc
+++ b/modules/audio_processing/aec3/erle_estimator_unittest.cc
@@ -12,6 +12,9 @@
 
 #include "api/array_view.h"
 #include "modules/audio_processing/aec3/erle_estimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/vector_buffer.h"
+#include "rtc_base/random.h"
 #include "test/gtest.h"
 
 namespace webrtc {
@@ -19,11 +22,9 @@
 namespace {
 
 constexpr int kLowFrequencyLimit = kFftLengthBy2 / 2;
-constexpr float kMaxErleLf = 8.f;
-constexpr float kMaxErleHf = 1.5f;
-constexpr float kMinErle = 1.0f;
 constexpr float kTrueErle = 10.f;
 constexpr float kTrueErleOnsets = 1.0f;
+constexpr float kEchoPathGain = 3.f;
 
 void VerifyErleBands(rtc::ArrayView<const float> erle,
                      float reference_lf,
@@ -44,80 +45,157 @@
   EXPECT_NEAR(reference_lf, erle_time_domain, 0.5);
 }
 
-void FormFarendFrame(std::array<float, kFftLengthBy2Plus1>* X2,
+void FormFarendTimeFrame(rtc::ArrayView<float> x) {
+  const std::array<float, kBlockSize> frame = {
+      7459.88, 17209.6, 17383,   20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+      6665.52, 14808.6, 9342.3,  7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+      7166.32, 6856.61, 21937,   7263.14, 9569.07, 14919,   8413.32, 7551.89,
+      7848.65, 6011.27, 13080.6, 15865.2, 12656,   17459.6, 4263.93, 4503.03,
+      9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+      11405,   15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+      1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+      12416.2, 16434,   2454.69, 9840.8,  6867.23, 1615.75, 6059.9,  8394.19};
+  RTC_DCHECK_GE(x.size(), frame.size());
+  std::copy(frame.begin(), frame.end(), x.begin());
+}
+
+void FormFarendFrame(const RenderBuffer& render_buffer,
+                     std::array<float, kFftLengthBy2Plus1>* X2,
                      std::array<float, kFftLengthBy2Plus1>* E2,
                      std::array<float, kFftLengthBy2Plus1>* Y2,
                      float erle) {
-  X2->fill(500 * 1000.f * 1000.f);
-  E2->fill(1000.f * 1000.f);
-  Y2->fill(erle * (*E2)[0]);
-}
+  const auto& spectrum_buffer = render_buffer.GetSpectrumBuffer();
+  const auto& X2_from_buffer = spectrum_buffer.buffer[spectrum_buffer.write];
+  std::copy(X2_from_buffer.begin(), X2_from_buffer.end(), X2->begin());
+  std::transform(X2->begin(), X2->end(), Y2->begin(),
+                 [](float a) { return a * kEchoPathGain * kEchoPathGain; });
+  std::transform(Y2->begin(), Y2->end(), E2->begin(),
+                 [erle](float a) { return a / erle; });
 
-void FormNearendFrame(std::array<float, kFftLengthBy2Plus1>* X2,
+}  // namespace
+
+void FormNearendFrame(rtc::ArrayView<float> x,
+                      std::array<float, kFftLengthBy2Plus1>* X2,
                       std::array<float, kFftLengthBy2Plus1>* E2,
                       std::array<float, kFftLengthBy2Plus1>* Y2) {
+  x[0] = 0.f;
   X2->fill(0.f);
   Y2->fill(500.f * 1000.f * 1000.f);
   E2->fill((*Y2)[0]);
 }
 
+void GetFilterFreq(std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                       filter_frequency_response,
+                   size_t delay_headroom_blocks) {
+  RTC_DCHECK_GE(filter_frequency_response.size(), delay_headroom_blocks);
+  for (auto& block_freq_resp : filter_frequency_response) {
+    block_freq_resp.fill(0.f);
+  }
+
+  for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+    filter_frequency_response[delay_headroom_blocks][k] = kEchoPathGain;
+  }
+}
+
 }  // namespace
 
 TEST(ErleEstimator, VerifyErleIncreaseAndHold) {
   std::array<float, kFftLengthBy2Plus1> X2;
   std::array<float, kFftLengthBy2Plus1> E2;
   std::array<float, kFftLengthBy2Plus1> Y2;
+  EchoCanceller3Config config;
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> filter_frequency_response(
+      config.filter.main.length_blocks);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create2(config, 3));
 
-  ErleEstimator estimator(0, kMinErle, kMaxErleLf, kMaxErleHf);
+  GetFilterFreq(filter_frequency_response, config.delay.delay_headroom_blocks);
 
+  ErleEstimator estimator(0, config);
+
+  FormFarendTimeFrame(x[0]);
+  render_delay_buffer->Insert(x);
+  render_delay_buffer->PrepareCaptureProcessing();
   // Verifies that the ERLE estimate is properly increased to higher values.
-  FormFarendFrame(&X2, &E2, &Y2, kTrueErle);
-
+  FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                  kTrueErle);
   for (size_t k = 0; k < 200; ++k) {
-    estimator.Update(X2, Y2, E2, true, true);
+    render_delay_buffer->Insert(x);
+    render_delay_buffer->PrepareCaptureProcessing();
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMaxErleLf, kMaxErleHf);
+             config.erle.max_l, config.erle.max_h);
 
-  FormNearendFrame(&X2, &E2, &Y2);
+  FormNearendFrame(x[0], &X2, &E2, &Y2);
   // Verifies that the ERLE is not immediately decreased during nearend
   // activity.
   for (size_t k = 0; k < 50; ++k) {
-    estimator.Update(X2, Y2, E2, true, true);
+    render_delay_buffer->Insert(x);
+    render_delay_buffer->PrepareCaptureProcessing();
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMaxErleLf, kMaxErleHf);
+             config.erle.max_l, config.erle.max_h);
 }
 
 TEST(ErleEstimator, VerifyErleTrackingOnOnsets) {
   std::array<float, kFftLengthBy2Plus1> X2;
   std::array<float, kFftLengthBy2Plus1> E2;
   std::array<float, kFftLengthBy2Plus1> Y2;
+  EchoCanceller3Config config;
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> filter_frequency_response(
+      config.filter.main.length_blocks);
 
-  ErleEstimator estimator(0, kMinErle, kMaxErleLf, kMaxErleHf);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create2(config, 3));
+
+  GetFilterFreq(filter_frequency_response, config.delay.delay_headroom_blocks);
+
+  ErleEstimator estimator(0, config);
+
+  FormFarendTimeFrame(x[0]);
+  render_delay_buffer->Insert(x);
+  render_delay_buffer->PrepareCaptureProcessing();
 
   for (size_t burst = 0; burst < 20; ++burst) {
-    FormFarendFrame(&X2, &E2, &Y2, kTrueErleOnsets);
+    FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                    kTrueErleOnsets);
     for (size_t k = 0; k < 10; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
-    FormFarendFrame(&X2, &E2, &Y2, kTrueErle);
+    FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                    kTrueErle);
     for (size_t k = 0; k < 200; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
-    FormNearendFrame(&X2, &E2, &Y2);
+    FormNearendFrame(x[0], &X2, &E2, &Y2);
     for (size_t k = 0; k < 300; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
   }
-  VerifyErleBands(estimator.ErleOnsets(), kMinErle, kMinErle);
-  FormNearendFrame(&X2, &E2, &Y2);
+  VerifyErleBands(estimator.ErleOnsets(), config.erle.min, config.erle.min);
+  FormNearendFrame(x[0], &X2, &E2, &Y2);
   for (size_t k = 0; k < 1000; k++) {
-    estimator.Update(X2, Y2, E2, true, true);
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   // Verifies that during ne activity, Erle converges to the Erle for onsets.
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMinErle, kMinErle);
+             config.erle.min, config.erle.min);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/filter_analyzer.cc b/modules/audio_processing/aec3/filter_analyzer.cc
index 5b890d7..3e69be6 100644
--- a/modules/audio_processing/aec3/filter_analyzer.cc
+++ b/modules/audio_processing/aec3/filter_analyzer.cc
@@ -25,18 +25,22 @@
 namespace webrtc {
 namespace {
 
-size_t FindPeakIndex(rtc::ArrayView<const float> filter_time_domain) {
-  size_t peak_index = 0;
-  float max_h2 = filter_time_domain[0] * filter_time_domain[0];
-  for (size_t k = 1; k < filter_time_domain.size(); ++k) {
+size_t FindPeakIndex(rtc::ArrayView<const float> filter_time_domain,
+                     size_t peak_index_in,
+                     size_t start_sample,
+                     size_t end_sample) {
+  size_t peak_index_out = peak_index_in;
+  float max_h2 =
+      filter_time_domain[peak_index_out] * filter_time_domain[peak_index_out];
+  for (size_t k = start_sample; k <= end_sample; ++k) {
     float tmp = filter_time_domain[k] * filter_time_domain[k];
     if (tmp > max_h2) {
-      peak_index = k;
+      peak_index_out = k;
       max_h2 = tmp;
     }
   }
 
-  return peak_index;
+  return peak_index_out;
 }
 
 bool EnableFilterPreprocessing() {
@@ -44,6 +48,11 @@
       "WebRTC-Aec3FilterAnalyzerPreprocessorKillSwitch");
 }
 
+bool EnableIncrementalAnalysis() {
+  return !field_trial::IsEnabled(
+      "WebRTC-Aec3FilterAnalyzerIncrementalAnalysisKillSwitch");
+}
+
 }  // namespace
 
 int FilterAnalyzer::instance_count_ = 0;
@@ -54,46 +63,37 @@
       use_preprocessed_filter_(EnableFilterPreprocessing()),
       bounded_erl_(config.ep_strength.bounded_erl),
       default_gain_(config.ep_strength.lf),
-      active_render_threshold_(config.render_levels.active_render_limit *
-                               config.render_levels.active_render_limit *
-                               kFftLengthBy2),
+      use_incremental_analysis_(EnableIncrementalAnalysis()),
       h_highpass_(GetTimeDomainLength(config.filter.main.length_blocks), 0.f),
-      filter_length_blocks_(config.filter.main_initial.length_blocks) {
+      filter_length_blocks_(config.filter.main_initial.length_blocks),
+      consistent_filter_detector_(config) {
   Reset();
 }
 
-void FilterAnalyzer::PreProcessFilter(
-    rtc::ArrayView<const float> filter_time_domain) {
-  RTC_DCHECK_GE(h_highpass_.capacity(), filter_time_domain.size());
-  h_highpass_.resize(filter_time_domain.size());
-  // Minimum phase high-pass filter with cutoff frequency at about 600 Hz.
-  constexpr std::array<float, 3> h = {{0.7929742f, -0.36072128f, -0.47047766f}};
-
-  std::fill(h_highpass_.begin(), h_highpass_.end(), 0.f);
-  for (size_t k = h.size() - 1; k < filter_time_domain.size(); ++k) {
-    for (size_t j = 0; j < h.size(); ++j) {
-      h_highpass_[k] += filter_time_domain[k - j] * h[j];
-    }
-  }
-}
-
 FilterAnalyzer::~FilterAnalyzer() = default;
 
 void FilterAnalyzer::Reset() {
   delay_blocks_ = 0;
-  consistent_estimate_ = false;
   blocks_since_reset_ = 0;
-  consistent_estimate_ = false;
-  consistent_estimate_counter_ = 0;
-  consistent_delay_reference_ = -10;
   gain_ = default_gain_;
+  peak_index_ = 0;
+  ResetRegion();
+  consistent_filter_detector_.Reset();
 }
 
-void FilterAnalyzer::Update(
+void FilterAnalyzer::Update(rtc::ArrayView<const float> filter_time_domain,
+                            const RenderBuffer& render_buffer) {
+  SetRegionToAnalyze(filter_time_domain);
+  AnalyzeRegion(filter_time_domain, render_buffer);
+}
+
+void FilterAnalyzer::AnalyzeRegion(
     rtc::ArrayView<const float> filter_time_domain,
-    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
-        filter_freq_response,
     const RenderBuffer& render_buffer) {
+  RTC_DCHECK_LT(region_.start_sample_, filter_time_domain.size());
+  RTC_DCHECK_LT(peak_index_, filter_time_domain.size());
+  RTC_DCHECK_LT(region_.end_sample_, filter_time_domain.size());
+
   // Preprocess the filter to avoid issues with low-frequency components in the
   // filter.
   PreProcessFilter(filter_time_domain);
@@ -103,51 +103,15 @@
       use_preprocessed_filter_ ? h_highpass_ : filter_time_domain;
   RTC_DCHECK_EQ(filter_to_analyze.size(), filter_time_domain.size());
 
-  size_t peak_index = FindPeakIndex(filter_to_analyze);
-  delay_blocks_ = peak_index >> kBlockSizeLog2;
-  UpdateFilterGain(filter_to_analyze, peak_index);
-
-  float filter_floor = 0;
-  float filter_secondary_peak = 0;
-  size_t limit1 = peak_index < 64 ? 0 : peak_index - 64;
-  size_t limit2 =
-      peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128;
-
-  for (size_t k = 0; k < limit1; ++k) {
-    float abs_h = fabsf(filter_to_analyze[k]);
-    filter_floor += abs_h;
-    filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
-  }
-  for (size_t k = limit2; k < filter_to_analyze.size(); ++k) {
-    float abs_h = fabsf(filter_to_analyze[k]);
-    filter_floor += abs_h;
-    filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
-  }
-
-  filter_floor /= (limit1 + filter_to_analyze.size() - limit2);
-
-  float abs_peak = fabsf(filter_to_analyze[peak_index]);
-  bool significant_peak_index =
-      abs_peak > 10.f * filter_floor && abs_peak > 2.f * filter_secondary_peak;
-
-  if (consistent_delay_reference_ != delay_blocks_ || !significant_peak_index) {
-    consistent_estimate_counter_ = 0;
-    consistent_delay_reference_ = delay_blocks_;
-  } else {
-    const auto& x = render_buffer.Block(-delay_blocks_)[0];
-    const float x_energy =
-        std::inner_product(x.begin(), x.end(), x.begin(), 0.f);
-    const bool active_render_block = x_energy > active_render_threshold_;
-
-    if (active_render_block) {
-      ++consistent_estimate_counter_;
-    }
-  }
-
-  consistent_estimate_ =
-      consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond;
-
+  peak_index_ = FindPeakIndex(filter_to_analyze, peak_index_,
+                              region_.start_sample_, region_.end_sample_);
+  delay_blocks_ = peak_index_ >> kBlockSizeLog2;
+  UpdateFilterGain(filter_to_analyze, peak_index_);
   filter_length_blocks_ = filter_time_domain.size() * (1.f / kBlockSize);
+
+  consistent_estimate_ = consistent_filter_detector_.Detect(
+      filter_to_analyze, region_, render_buffer.Block(-delay_blocks_)[0],
+      peak_index_, delay_blocks_);
 }
 
 void FilterAnalyzer::UpdateFilterGain(
@@ -169,4 +133,114 @@
   }
 }
 
+void FilterAnalyzer::PreProcessFilter(
+    rtc::ArrayView<const float> filter_time_domain) {
+  RTC_DCHECK_GE(h_highpass_.capacity(), filter_time_domain.size());
+  h_highpass_.resize(filter_time_domain.size());
+  // Minimum phase high-pass filter with cutoff frequency at about 600 Hz.
+  constexpr std::array<float, 3> h = {{0.7929742f, -0.36072128f, -0.47047766f}};
+
+  std::fill(h_highpass_.begin() + region_.start_sample_,
+            h_highpass_.begin() + region_.end_sample_ + 1, 0.f);
+  for (size_t k = std::max(h.size() - 1, region_.start_sample_);
+       k <= region_.end_sample_; ++k) {
+    for (size_t j = 0; j < h.size(); ++j) {
+      h_highpass_[k] += filter_time_domain[k - j] * h[j];
+    }
+  }
+}
+
+void FilterAnalyzer::ResetRegion() {
+  region_.start_sample_ = 0;
+  region_.end_sample_ = 0;
+}
+
+void FilterAnalyzer::SetRegionToAnalyze(
+    rtc::ArrayView<const float> filter_time_domain) {
+  constexpr size_t kNumberBlocksToUpdate = 1;
+  auto& r = region_;
+  if (use_incremental_analysis_) {
+    r.start_sample_ =
+        r.end_sample_ == filter_time_domain.size() - 1 ? 0 : r.end_sample_ + 1;
+    r.end_sample_ =
+        std::min(r.start_sample_ + kNumberBlocksToUpdate * kBlockSize - 1,
+                 filter_time_domain.size() - 1);
+
+  } else {
+    r.start_sample_ = 0;
+    r.end_sample_ = filter_time_domain.size() - 1;
+  }
+}
+
+FilterAnalyzer::ConsistentFilterDetector::ConsistentFilterDetector(
+    const EchoCanceller3Config& config)
+    : active_render_threshold_(config.render_levels.active_render_limit *
+                               config.render_levels.active_render_limit *
+                               kFftLengthBy2) {}
+
+void FilterAnalyzer::ConsistentFilterDetector::Reset() {
+  significant_peak_ = false;
+  filter_floor_accum_ = 0.f;
+  filter_secondary_peak_ = 0.f;
+  filter_floor_low_limit_ = 0;
+  filter_floor_high_limit_ = 0;
+  consistent_estimate_counter_ = 0;
+  consistent_delay_reference_ = -10;
+}
+
+bool FilterAnalyzer::ConsistentFilterDetector::Detect(
+    rtc::ArrayView<const float> filter_to_analyze,
+    const FilterRegion& region,
+    rtc::ArrayView<const float> x_block,
+    size_t peak_index,
+    int delay_blocks) {
+  if (region.start_sample_ == 0) {
+    filter_floor_accum_ = 0.f;
+    filter_secondary_peak_ = 0.f;
+    filter_floor_low_limit_ = peak_index < 64 ? 0 : peak_index - 64;
+    filter_floor_high_limit_ =
+        peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128;
+  }
+
+  for (size_t k = region.start_sample_;
+       k < std::min(region.end_sample_ + 1, filter_floor_low_limit_); ++k) {
+    float abs_h = fabsf(filter_to_analyze[k]);
+    filter_floor_accum_ += abs_h;
+    filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h);
+  }
+
+  for (size_t k = std::max(filter_floor_high_limit_, region.start_sample_);
+       k <= region.end_sample_; ++k) {
+    float abs_h = fabsf(filter_to_analyze[k]);
+    filter_floor_accum_ += abs_h;
+    filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h);
+  }
+
+  if (region.end_sample_ == filter_to_analyze.size() - 1) {
+    float filter_floor = filter_floor_accum_ /
+                         (filter_floor_low_limit_ + filter_to_analyze.size() -
+                          filter_floor_high_limit_);
+
+    float abs_peak = fabsf(filter_to_analyze[peak_index]);
+    significant_peak_ = abs_peak > 10.f * filter_floor &&
+                        abs_peak > 2.f * filter_secondary_peak_;
+  }
+
+  if (significant_peak_) {
+    const float x_energy = std::inner_product(x_block.begin(), x_block.end(),
+                                              x_block.begin(), 0.f);
+    const bool active_render_block = x_energy > active_render_threshold_;
+
+    if (consistent_delay_reference_ == delay_blocks) {
+      if (active_render_block) {
+        ++consistent_estimate_counter_;
+      }
+    } else {
+      consistent_estimate_counter_ = 0;
+      consistent_delay_reference_ = delay_blocks;
+    }
+  }
+  return consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond;
+}
+
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/filter_analyzer.h b/modules/audio_processing/aec3/filter_analyzer.h
index 99a0e25..e0fd069 100644
--- a/modules/audio_processing/aec3/filter_analyzer.h
+++ b/modules/audio_processing/aec3/filter_analyzer.h
@@ -37,8 +37,6 @@
 
   // Updates the estimates with new input data.
   void Update(rtc::ArrayView<const float> filter_time_domain,
-              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
-                  filter_freq_response,
               const RenderBuffer& render_buffer);
 
   // Returns the delay of the filter in terms of blocks.
@@ -58,24 +56,61 @@
   rtc::ArrayView<const float> GetAdjustedFilter() const { return h_highpass_; }
 
  private:
+  void AnalyzeRegion(rtc::ArrayView<const float> filter_time_domain,
+                     const RenderBuffer& render_buffer);
+
   void UpdateFilterGain(rtc::ArrayView<const float> filter_time_domain,
                         size_t max_index);
   void PreProcessFilter(rtc::ArrayView<const float> filter_time_domain);
 
+  void ResetRegion();
+
+  void SetRegionToAnalyze(rtc::ArrayView<const float> filter_time_domain);
+
+  struct FilterRegion {
+    size_t start_sample_;
+    size_t end_sample_;
+  };
+
+  // This class checks whether the shape of the impulse response has been
+  // consistent over time.
+  class ConsistentFilterDetector {
+   public:
+    explicit ConsistentFilterDetector(const EchoCanceller3Config& config);
+    void Reset();
+    bool Detect(rtc::ArrayView<const float> filter_to_analyze,
+                const FilterRegion& region,
+                rtc::ArrayView<const float> x_block,
+                size_t peak_index,
+                int delay_blocks);
+
+   private:
+    bool significant_peak_;
+    float filter_floor_accum_;
+    float filter_secondary_peak_;
+    size_t filter_floor_low_limit_;
+    size_t filter_floor_high_limit_;
+    const float active_render_threshold_;
+    size_t consistent_estimate_counter_ = 0;
+    int consistent_delay_reference_ = -10;
+  };
+
   static int instance_count_;
   std::unique_ptr<ApmDataDumper> data_dumper_;
   const bool use_preprocessed_filter_;
   const bool bounded_erl_;
   const float default_gain_;
-  const float active_render_threshold_;
+  const bool use_incremental_analysis_;
   std::vector<float> h_highpass_;
   int delay_blocks_ = 0;
   size_t blocks_since_reset_ = 0;
   bool consistent_estimate_ = false;
-  size_t consistent_estimate_counter_ = 0;
-  int consistent_delay_reference_ = -10;
   float gain_;
+  size_t peak_index_;
   int filter_length_blocks_;
+  FilterRegion region_;
+  ConsistentFilterDetector consistent_filter_detector_;
+
   RTC_DISALLOW_COPY_AND_ASSIGN(FilterAnalyzer);
 };
 
diff --git a/modules/audio_processing/aec3/fullband_erle_estimator.cc b/modules/audio_processing/aec3/fullband_erle_estimator.cc
index dc74509..7893b97 100644
--- a/modules/audio_processing/aec3/fullband_erle_estimator.cc
+++ b/modules/audio_processing/aec3/fullband_erle_estimator.cc
@@ -26,7 +26,7 @@
 namespace {
 constexpr float kEpsilon = 1e-3f;
 constexpr float kX2BandEnergyThreshold = 44015068.0f;
-constexpr int kErleHold = 100;
+constexpr int kBlocksToHoldErle = 100;
 constexpr int kPointsToAccumulate = 6;
 }  // namespace
 
@@ -55,7 +55,7 @@
       const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f);
       const float E2_sum = std::accumulate(E2.begin(), E2.end(), 0.0f);
       if (instantaneous_erle_.Update(Y2_sum, E2_sum)) {
-        hold_counter_time_domain_ = kErleHold;
+        hold_counter_time_domain_ = kBlocksToHoldErle;
         erle_time_domain_log2_ +=
             0.1f * ((instantaneous_erle_.GetInstErleLog2().value()) -
                     erle_time_domain_log2_);
diff --git a/modules/audio_processing/aec3/mock/mock_render_delay_controller.h b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
index 5520f76..5f652e1 100644
--- a/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
+++ b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
@@ -33,6 +33,7 @@
                    size_t render_delay_buffer_delay,
                    const absl::optional<int>& echo_remover_delay,
                    rtc::ArrayView<const float> capture));
+  MOCK_CONST_METHOD0(HasClockdrift, bool());
 };
 
 }  // namespace test
diff --git a/modules/audio_processing/aec3/render_delay_controller.cc b/modules/audio_processing/aec3/render_delay_controller.cc
index 36e75d9..c4665ea 100644
--- a/modules/audio_processing/aec3/render_delay_controller.cc
+++ b/modules/audio_processing/aec3/render_delay_controller.cc
@@ -64,6 +64,7 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) override;
+  bool HasClockdrift() const override;
 
  private:
   static int instance_count_;
@@ -285,7 +286,8 @@
 
   metrics_.Update(delay_samples_ ? absl::optional<size_t>(delay_samples_->delay)
                                  : absl::nullopt,
-                  delay_ ? delay_->delay : 0, skew_shift);
+                  delay_ ? delay_->delay : 0, skew_shift,
+                  delay_estimator_.Clockdrift());
 
   data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
                         delay_samples ? delay_samples->delay : 0);
@@ -301,6 +303,10 @@
   return delay_;
 }
 
+bool RenderDelayControllerImpl::HasClockdrift() const {
+  return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone;
+}
+
 }  // namespace
 
 RenderDelayController* RenderDelayController::Create(
diff --git a/modules/audio_processing/aec3/render_delay_controller.h b/modules/audio_processing/aec3/render_delay_controller.h
index 41ba422..b46ed89 100644
--- a/modules/audio_processing/aec3/render_delay_controller.h
+++ b/modules/audio_processing/aec3/render_delay_controller.h
@@ -44,6 +44,9 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) = 0;
+
+  // Returns true if clockdrift has been detected.
+  virtual bool HasClockdrift() const = 0;
 };
 }  // namespace webrtc
 
diff --git a/modules/audio_processing/aec3/render_delay_controller2.cc b/modules/audio_processing/aec3/render_delay_controller2.cc
index 1b7c18d..00daf8f 100644
--- a/modules/audio_processing/aec3/render_delay_controller2.cc
+++ b/modules/audio_processing/aec3/render_delay_controller2.cc
@@ -46,6 +46,7 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) override;
+  bool HasClockdrift() const override;
 
  private:
   static int instance_count_;
@@ -127,7 +128,7 @@
   delay_samples_ = absl::nullopt;
   delay_estimator_.Reset(reset_delay_confidence);
   delay_change_counter_ = 0;
-  if (reset_delay_confidence || true) {
+  if (reset_delay_confidence) {
     last_delay_estimate_quality_ = DelayEstimate::Quality::kCoarse;
   }
 }
@@ -192,7 +193,7 @@
 
   metrics_.Update(delay_samples_ ? absl::optional<size_t>(delay_samples_->delay)
                                  : absl::nullopt,
-                  delay_ ? delay_->delay : 0, 0);
+                  delay_ ? delay_->delay : 0, 0, delay_estimator_.Clockdrift());
 
   data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
                         delay_samples ? delay_samples->delay : 0);
@@ -202,6 +203,10 @@
   return delay_;
 }
 
+bool RenderDelayControllerImpl2::HasClockdrift() const {
+  return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone;
+}
+
 }  // namespace
 
 RenderDelayController* RenderDelayController::Create2(
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.cc b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
index c51d468..582e033 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics.cc
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
@@ -46,7 +46,8 @@
 void RenderDelayControllerMetrics::Update(
     absl::optional<size_t> delay_samples,
     size_t buffer_delay_blocks,
-    absl::optional<int> skew_shift_blocks) {
+    absl::optional<int> skew_shift_blocks,
+    ClockdriftDetector::Level clockdrift) {
   ++call_counter_;
 
   if (!initial_update) {
@@ -115,6 +116,10 @@
         static_cast<int>(delay_changes),
         static_cast<int>(DelayChangesCategory::kNumCategories));
 
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.Clockdrift", static_cast<int>(clockdrift),
+        static_cast<int>(ClockdriftDetector::Level::kNumCategories));
+
     metrics_reported_ = true;
     call_counter_ = 0;
     ResetMetrics();
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.h b/modules/audio_processing/aec3/render_delay_controller_metrics.h
index 50e60bb..22cc202 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics.h
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.h
@@ -14,6 +14,7 @@
 #include <stddef.h>
 
 #include "absl/types/optional.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
 #include "rtc_base/constructormagic.h"
 
 namespace webrtc {
@@ -26,7 +27,8 @@
   // Updates the metric with new data.
   void Update(absl::optional<size_t> delay_samples,
               size_t buffer_delay_blocks,
-              absl::optional<int> skew_shift_blocks);
+              absl::optional<int> skew_shift_blocks,
+              ClockdriftDetector::Level clockdrift);
 
   // Returns true if the metrics have just been reported, otherwise false.
   bool MetricsReported() { return metrics_reported_; }
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
index e867de4..216b0e2 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
@@ -22,10 +22,12 @@
 
   for (int j = 0; j < 3; ++j) {
     for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
-      metrics.Update(absl::nullopt, 0, absl::nullopt);
+      metrics.Update(absl::nullopt, 0, absl::nullopt,
+                     ClockdriftDetector::Level::kNone);
       EXPECT_FALSE(metrics.MetricsReported());
     }
-    metrics.Update(absl::nullopt, 0, absl::nullopt);
+    metrics.Update(absl::nullopt, 0, absl::nullopt,
+                   ClockdriftDetector::Level::kNone);
     EXPECT_TRUE(metrics.MetricsReported());
   }
 }
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc b/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
new file mode 100644
index 0000000..32b36ab
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
@@ -0,0 +1,368 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <functional>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/vector_buffer.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr std::array<size_t, SignalDependentErleEstimator::kSubbands + 1>
+    kBandBoundaries = {1, 8, 16, 24, 32, 48, kFftLengthBy2Plus1};
+
+std::array<size_t, kFftLengthBy2Plus1> FormSubbandMap() {
+  std::array<size_t, kFftLengthBy2Plus1> map_band_to_subband;
+  size_t subband = 1;
+  for (size_t k = 0; k < map_band_to_subband.size(); ++k) {
+    RTC_DCHECK_LT(subband, kBandBoundaries.size());
+    if (k >= kBandBoundaries[subband]) {
+      subband++;
+      RTC_DCHECK_LT(k, kBandBoundaries[subband]);
+    }
+    map_band_to_subband[k] = subband - 1;
+  }
+  return map_band_to_subband;
+}
+
+// Defines the size in blocks of the sections that are used for dividing the
+// linear filter. The sections are split in a non-linear manner so that lower
+// sections that typically represent the direct path have a larger resolution
+// than the higher sections which typically represent more reverberant acoustic
+// paths.
+std::vector<size_t> DefineFilterSectionSizes(size_t delay_headroom_blocks,
+                                             size_t num_blocks,
+                                             size_t num_sections) {
+  size_t filter_length_blocks = num_blocks - delay_headroom_blocks;
+  std::vector<size_t> section_sizes(num_sections);
+  size_t remaining_blocks = filter_length_blocks;
+  size_t remaining_sections = num_sections;
+  size_t estimator_size = 2;
+  size_t idx = 0;
+  while (remaining_sections > 1 &&
+         remaining_blocks > estimator_size * remaining_sections) {
+    RTC_DCHECK_LT(idx, section_sizes.size());
+    section_sizes[idx] = estimator_size;
+    remaining_blocks -= estimator_size;
+    remaining_sections--;
+    estimator_size *= 2;
+    idx++;
+  }
+
+  size_t last_groups_size = remaining_blocks / remaining_sections;
+  for (; idx < num_sections; idx++) {
+    section_sizes[idx] = last_groups_size;
+  }
+  section_sizes[num_sections - 1] +=
+      remaining_blocks - last_groups_size * remaining_sections;
+  return section_sizes;
+}
+
+// Forms the limits in blocks for each filter section. Those sections
+// are used for analyzing the echo estimates and investigating which
+// linear filter sections contribute most to the echo estimate energy.
+std::vector<size_t> SetSectionsBoundaries(size_t delay_headroom_blocks,
+                                          size_t num_blocks,
+                                          size_t num_sections) {
+  std::vector<size_t> estimator_boundaries_blocks(num_sections + 1);
+  if (estimator_boundaries_blocks.size() == 2) {
+    estimator_boundaries_blocks[0] = 0;
+    estimator_boundaries_blocks[1] = num_blocks;
+    return estimator_boundaries_blocks;
+  }
+  RTC_DCHECK_GT(estimator_boundaries_blocks.size(), 2);
+  const std::vector<size_t> section_sizes =
+      DefineFilterSectionSizes(delay_headroom_blocks, num_blocks,
+                               estimator_boundaries_blocks.size() - 1);
+
+  size_t idx = 0;
+  size_t current_size_block = 0;
+  RTC_DCHECK_EQ(section_sizes.size() + 1, estimator_boundaries_blocks.size());
+  estimator_boundaries_blocks[0] = delay_headroom_blocks;
+  for (size_t k = delay_headroom_blocks; k < num_blocks; ++k) {
+    current_size_block++;
+    if (current_size_block >= section_sizes[idx]) {
+      idx = idx + 1;
+      if (idx == section_sizes.size()) {
+        break;
+      }
+      estimator_boundaries_blocks[idx] = k + 1;
+      current_size_block = 0;
+    }
+  }
+  estimator_boundaries_blocks[section_sizes.size()] = num_blocks;
+  return estimator_boundaries_blocks;
+}
+
+std::array<float, SignalDependentErleEstimator::kSubbands>
+SetMaxErleSubbands(float max_erle_l, float max_erle_h, size_t limit_subband_l) {
+  std::array<float, SignalDependentErleEstimator::kSubbands> max_erle;
+  std::fill(max_erle.begin(), max_erle.begin() + limit_subband_l, max_erle_l);
+  std::fill(max_erle.begin() + limit_subband_l, max_erle.end(), max_erle_h);
+  return max_erle;
+}
+
+}  // namespace
+
+SignalDependentErleEstimator::SignalDependentErleEstimator(
+    const EchoCanceller3Config& config)
+    : min_erle_(config.erle.min),
+      num_sections_(config.erle.num_sections),
+      num_blocks_(config.filter.main.length_blocks),
+      delay_headroom_blocks_(config.delay.delay_headroom_blocks),
+      band_to_subband_(FormSubbandMap()),
+      max_erle_(SetMaxErleSubbands(config.erle.max_l,
+                                   config.erle.max_h,
+                                   band_to_subband_[kFftLengthBy2 / 2])),
+      section_boundaries_blocks_(SetSectionsBoundaries(delay_headroom_blocks_,
+                                                       num_blocks_,
+                                                       num_sections_)),
+      S2_section_accum_(num_sections_),
+      erle_estimators_(num_sections_),
+      correction_factors_(num_sections_) {
+  RTC_DCHECK_LE(num_sections_, num_blocks_);
+  RTC_DCHECK_GE(num_sections_, 1);
+
+  Reset();
+}
+
+SignalDependentErleEstimator::~SignalDependentErleEstimator() = default;
+
+void SignalDependentErleEstimator::Reset() {
+  erle_.fill(min_erle_);
+  for (auto& erle : erle_estimators_) {
+    erle.fill(min_erle_);
+  }
+  erle_ref_.fill(min_erle_);
+  for (auto& factor : correction_factors_) {
+    factor.fill(1.0f);
+  }
+  num_updates_.fill(0);
+}
+
+// Updates the Erle estimate by analyzing the current input signals. It takes
+// the render buffer and the filter frequency response in order to do an
+// estimation of the number of sections of the linear filter that are needed
+// for getting the majority of the energy in the echo estimate. Based on that
+// number of sections, it updates the erle estimation by introducing a
+// correction factor to the erle that is given as an input to this method.
+void SignalDependentErleEstimator::Update(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2,
+    rtc::ArrayView<const float> average_erle,
+    bool converged_filter) {
+  RTC_DCHECK_GT(num_sections_, 1);
+
+  // Gets the number of filter sections that are needed for achieving 90 %
+  // of the power spectrum energy of the echo estimate.
+  std::array<size_t, kFftLengthBy2Plus1> n_active_sections;
+  ComputeNumberOfActiveFilterSections(render_buffer, filter_frequency_response,
+                                      n_active_sections);
+
+  if (converged_filter) {
+    // Updates the correction factor that is used for correcting the erle and
+    // adapt it to the particular characteristics of the input signal.
+    UpdateCorrectionFactors(X2, Y2, E2, n_active_sections);
+  }
+
+  // Applies the correction factor to the input erle for getting a more refined
+  // erle estimation for the current input signal.
+  for (size_t k = 0; k < kFftLengthBy2; ++k) {
+    float correction_factor =
+        correction_factors_[n_active_sections[k]][band_to_subband_[k]];
+    erle_[k] = rtc::SafeClamp(average_erle[k] * correction_factor, min_erle_,
+                              max_erle_[band_to_subband_[k]]);
+  }
+}
+
+void SignalDependentErleEstimator::Dump(
+    const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+  for (auto& erle : erle_estimators_) {
+    data_dumper->DumpRaw("aec3_all_erle", erle);
+  }
+  data_dumper->DumpRaw("aec3_ref_erle", erle_ref_);
+  for (auto& factor : correction_factors_) {
+    data_dumper->DumpRaw("aec3_erle_correction_factor", factor);
+  }
+  data_dumper->DumpRaw("aec3_erle", erle_);
+}
+
+// Estimates for each band the smallest number of sections in the filter that
+// together constitute 90% of the estimated echo energy.
+void SignalDependentErleEstimator::ComputeNumberOfActiveFilterSections(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<size_t> n_active_filter_sections) {
+  RTC_DCHECK_GT(num_sections_, 1);
+  // Computes an approximation of the power spectrum if the filter would have
+  // been limited to a certain number of filter sections.
+  ComputeEchoEstimatePerFilterSection(render_buffer, filter_frequency_response);
+  // For each band, computes the number of filter sections that are needed for
+  // achieving the 90 % energy in the echo estimate.
+  ComputeActiveFilterSections(n_active_filter_sections);
+}
+
+void SignalDependentErleEstimator::UpdateCorrectionFactors(
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2,
+    rtc::ArrayView<const size_t> n_active_sections) {
+  constexpr float kX2BandEnergyThreshold = 44015068.0f;
+  constexpr float kSmthConstantDecreases = 0.1f;
+  constexpr float kSmthConstantIncreases = kSmthConstantDecreases / 2.f;
+  auto subband_powers = [](rtc::ArrayView<const float> power_spectrum,
+                           rtc::ArrayView<float> power_spectrum_subbands) {
+    for (size_t subband = 0; subband < kSubbands; ++subband) {
+      RTC_DCHECK_LE(kBandBoundaries[subband + 1], power_spectrum.size());
+      power_spectrum_subbands[subband] = std::accumulate(
+          power_spectrum.begin() + kBandBoundaries[subband],
+          power_spectrum.begin() + kBandBoundaries[subband + 1], 0.f);
+    }
+  };
+
+  std::array<float, kSubbands> X2_subbands, E2_subbands, Y2_subbands;
+  subband_powers(X2, X2_subbands);
+  subband_powers(E2, E2_subbands);
+  subband_powers(Y2, Y2_subbands);
+  std::array<size_t, kSubbands> idx_subbands;
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    // When aggregating the number of active sections in the filter for
+    // different bands we choose to take the minimum of all of them. As an
+    // example, if for one of the bands it is the direct path its main
+    // contributor to the final echo estimate, we consider the direct path is
+    // as well the main contributor for the subband that contains that
+    // particular band. That aggregate number of sections will be later used as
+    // the identifier of the erle estimator that needs to be updated.
+    RTC_DCHECK_LE(kBandBoundaries[subband + 1], n_active_sections.size());
+    idx_subbands[subband] = *std::min_element(
+        n_active_sections.begin() + kBandBoundaries[subband],
+        n_active_sections.begin() + kBandBoundaries[subband + 1]);
+  }
+
+  std::array<float, kSubbands> new_erle;
+  std::array<bool, kSubbands> is_erle_updated;
+  is_erle_updated.fill(false);
+  new_erle.fill(0.f);
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    if (X2_subbands[subband] > kX2BandEnergyThreshold &&
+        E2_subbands[subband] > 0) {
+      new_erle[subband] = Y2_subbands[subband] / E2_subbands[subband];
+      RTC_DCHECK_GT(new_erle[subband], 0);
+      is_erle_updated[subband] = true;
+      ++num_updates_[subband];
+    }
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    const size_t idx = idx_subbands[subband];
+    RTC_DCHECK_LT(idx, erle_estimators_.size());
+    float alpha = new_erle[subband] > erle_estimators_[idx][subband]
+                      ? kSmthConstantIncreases
+                      : kSmthConstantDecreases;
+    alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+    erle_estimators_[idx][subband] +=
+        alpha * (new_erle[subband] - erle_estimators_[idx][subband]);
+    erle_estimators_[idx][subband] = rtc::SafeClamp(
+        erle_estimators_[idx][subband], min_erle_, max_erle_[subband]);
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    float alpha = new_erle[subband] > erle_ref_[subband]
+                      ? kSmthConstantIncreases
+                      : kSmthConstantDecreases;
+    alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+    erle_ref_[subband] += alpha * (new_erle[subband] - erle_ref_[subband]);
+    erle_ref_[subband] =
+        rtc::SafeClamp(erle_ref_[subband], min_erle_, max_erle_[subband]);
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    constexpr int kNumUpdateThr = 50;
+    if (is_erle_updated[subband] && num_updates_[subband] > kNumUpdateThr) {
+      const size_t idx = idx_subbands[subband];
+      RTC_DCHECK_GT(erle_ref_[subband], 0.f);
+      // Computes the ratio between the erle that is updated using all the
+      // points and the erle that is updated only on signals that share the
+      // same number of active filter sections.
+      float new_correction_factor =
+          erle_estimators_[idx][subband] / erle_ref_[subband];
+
+      correction_factors_[idx][subband] +=
+          0.1f * (new_correction_factor - correction_factors_[idx][subband]);
+    }
+  }
+}
+
+void SignalDependentErleEstimator::ComputeEchoEstimatePerFilterSection(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response) {
+  const VectorBuffer& spectrum_render_buffer =
+      render_buffer.GetSpectrumBuffer();
+
+  RTC_DCHECK_EQ(S2_section_accum_.size() + 1,
+                section_boundaries_blocks_.size());
+  size_t idx_render = render_buffer.Position();
+  idx_render = spectrum_render_buffer.OffsetIndex(
+      idx_render, section_boundaries_blocks_[0]);
+
+  for (size_t section = 0; section < num_sections_; ++section) {
+    std::array<float, kFftLengthBy2Plus1> X2_section;
+    std::array<float, kFftLengthBy2Plus1> H2_section;
+    X2_section.fill(0.f);
+    H2_section.fill(0.f);
+    for (size_t block = section_boundaries_blocks_[section];
+         block < section_boundaries_blocks_[section + 1]; ++block) {
+      std::transform(X2_section.begin(), X2_section.end(),
+                     spectrum_render_buffer.buffer[idx_render].begin(),
+                     X2_section.begin(), std::plus<float>());
+      std::transform(H2_section.begin(), H2_section.end(),
+                     filter_frequency_response[block].begin(),
+                     H2_section.begin(), std::plus<float>());
+      idx_render = spectrum_render_buffer.IncIndex(idx_render);
+    }
+
+    std::transform(X2_section.begin(), X2_section.end(), H2_section.begin(),
+                   S2_section_accum_[section].begin(),
+                   std::multiplies<float>());
+  }
+
+  for (size_t section = 1; section < num_sections_; ++section) {
+    std::transform(S2_section_accum_[section - 1].begin(),
+                   S2_section_accum_[section - 1].end(),
+                   S2_section_accum_[section].begin(),
+                   S2_section_accum_[section].begin(), std::plus<float>());
+  }
+}
+
+void SignalDependentErleEstimator::ComputeActiveFilterSections(
+    rtc::ArrayView<size_t> number_active_filter_sections) const {
+  std::fill(number_active_filter_sections.begin(),
+            number_active_filter_sections.end(), 0);
+  for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+    size_t section = num_sections_;
+    float target = 0.9f * S2_section_accum_[num_sections_ - 1][k];
+    while (section > 0 && S2_section_accum_[section - 1][k] >= target) {
+      number_active_filter_sections[k] = --section;
+    }
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator.h b/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
new file mode 100644
index 0000000..d8b56c2
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// This class estimates the dependency of the Erle to the input signal. By
+// looking at the input signal, an estimation on whether the current echo
+// estimate is due to the direct path or to a more reverberant one is performed.
+// Once that estimation is done, it is possible to refine the average Erle that
+// this class receive as an input.
+class SignalDependentErleEstimator {
+ public:
+  explicit SignalDependentErleEstimator(const EchoCanceller3Config& config);
+
+  ~SignalDependentErleEstimator();
+
+  void Reset();
+
+  // Returns the Erle per frequency subband.
+  const std::array<float, kFftLengthBy2Plus1>& Erle() const { return erle_; }
+
+  // Updates the Erle estimate. The Erle that is passed as an input is required
+  // to be an estimation of the average Erle achieved by the linear filter.
+  void Update(const RenderBuffer& render_buffer,
+              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                  filter_frequency_response,
+              rtc::ArrayView<const float> X2,
+              rtc::ArrayView<const float> Y2,
+              rtc::ArrayView<const float> E2,
+              rtc::ArrayView<const float> average_erle,
+              bool converged_filter);
+
+  void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+  static constexpr size_t kSubbands = 6;
+
+ private:
+  void ComputeNumberOfActiveFilterSections(
+      const RenderBuffer& render_buffer,
+      const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+          filter_frequency_response,
+      rtc::ArrayView<size_t> n_active_filter_sections);
+
+  void UpdateCorrectionFactors(rtc::ArrayView<const float> X2,
+                               rtc::ArrayView<const float> Y2,
+                               rtc::ArrayView<const float> E2,
+                               rtc::ArrayView<const size_t> n_active_sections);
+
+  void ComputeEchoEstimatePerFilterSection(
+      const RenderBuffer& render_buffer,
+      const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+          filter_frequency_response);
+
+  void ComputeActiveFilterSections(
+      rtc::ArrayView<size_t> number_active_filter_sections) const;
+
+  const float min_erle_;
+  const size_t num_sections_;
+  const size_t num_blocks_;
+  const size_t delay_headroom_blocks_;
+  const std::array<size_t, kFftLengthBy2Plus1> band_to_subband_;
+  const std::array<float, kSubbands> max_erle_;
+  const std::vector<size_t> section_boundaries_blocks_;
+  std::array<float, kFftLengthBy2Plus1> erle_;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> S2_section_accum_;
+  std::vector<std::array<float, kSubbands>> erle_estimators_;
+  std::array<float, kSubbands> erle_ref_;
+  std::vector<std::array<float, kSubbands>> correction_factors_;
+  std::array<int, kSubbands> num_updates_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc b/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
new file mode 100644
index 0000000..aec605f
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <iostream>
+#include <string>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void GetActiveFrame(rtc::ArrayView<float> x) {
+  const std::array<float, kBlockSize> frame = {
+      7459.88, 17209.6, 17383,   20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+      6665.52, 14808.6, 9342.3,  7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+      7166.32, 6856.61, 21937,   7263.14, 9569.07, 14919,   8413.32, 7551.89,
+      7848.65, 6011.27, 13080.6, 15865.2, 12656,   17459.6, 4263.93, 4503.03,
+      9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+      11405,   15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+      1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+      12416.2, 16434,   2454.69, 9840.8,  6867.23, 1615.75, 6059.9,  8394.19};
+  RTC_DCHECK_GE(x.size(), frame.size());
+  std::copy(frame.begin(), frame.end(), x.begin());
+}
+
+class TestInputs {
+ public:
+  explicit TestInputs(const EchoCanceller3Config& cfg);
+  ~TestInputs();
+  const RenderBuffer& GetRenderBuffer() { return *render_buffer_; }
+  rtc::ArrayView<const float> GetX2() { return X2_; }
+  rtc::ArrayView<const float> GetY2() { return Y2_; }
+  rtc::ArrayView<const float> GetE2() { return E2_; }
+  std::vector<std::array<float, kFftLengthBy2Plus1>> GetH2() { return H2_; }
+  void Update();
+
+ private:
+  void UpdateCurrentPowerSpectra();
+  int n_ = 0;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer_;
+  RenderBuffer* render_buffer_;
+  std::array<float, kFftLengthBy2Plus1> X2_;
+  std::array<float, kFftLengthBy2Plus1> Y2_;
+  std::array<float, kFftLengthBy2Plus1> E2_;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2_;
+  std::vector<std::vector<float>> x_;
+};
+
+TestInputs::TestInputs(const EchoCanceller3Config& cfg)
+    : render_delay_buffer_(RenderDelayBuffer::Create2(cfg, 1)),
+      H2_(cfg.filter.main.length_blocks),
+      x_(1, std::vector<float>(kBlockSize, 0.f)) {
+  render_delay_buffer_->SetDelay(4);
+  render_buffer_ = render_delay_buffer_->GetRenderBuffer();
+  for (auto& H : H2_) {
+    H.fill(0.f);
+  }
+  H2_[0].fill(1.0f);
+}
+
+TestInputs::~TestInputs() = default;
+
+void TestInputs::Update() {
+  if (n_ % 2 == 0) {
+    std::fill(x_[0].begin(), x_[0].end(), 0.f);
+  } else {
+    GetActiveFrame(x_[0]);
+  }
+
+  render_delay_buffer_->Insert(x_);
+  render_delay_buffer_->PrepareCaptureProcessing();
+  UpdateCurrentPowerSpectra();
+  ++n_;
+}
+
+void TestInputs::UpdateCurrentPowerSpectra() {
+  const VectorBuffer& spectrum_render_buffer =
+      render_buffer_->GetSpectrumBuffer();
+  size_t idx = render_buffer_->Position();
+  size_t prev_idx = spectrum_render_buffer.OffsetIndex(idx, 1);
+  auto& X2 = spectrum_render_buffer.buffer[idx];
+  auto& X2_prev = spectrum_render_buffer.buffer[prev_idx];
+  std::copy(X2.begin(), X2.end(), X2_.begin());
+  RTC_DCHECK_EQ(X2.size(), Y2_.size());
+  for (size_t k = 0; k < X2.size(); ++k) {
+    E2_[k] = 0.01f * X2_prev[k];
+    Y2_[k] = X2[k] + E2_[k];
+  }
+}
+
+}  // namespace
+
+TEST(SignalDependentErleEstimator, SweepSettings) {
+  EchoCanceller3Config cfg;
+  size_t max_length_blocks = 50;
+  for (size_t blocks = 0; blocks < max_length_blocks; blocks = blocks + 10) {
+    for (size_t delay_headroom = 0; delay_headroom < 5; ++delay_headroom) {
+      for (size_t num_sections = 2; num_sections < max_length_blocks;
+           ++num_sections) {
+        cfg.filter.main.length_blocks = blocks;
+        cfg.filter.main_initial.length_blocks =
+            std::min(cfg.filter.main_initial.length_blocks, blocks);
+        cfg.delay.delay_headroom_blocks = delay_headroom;
+        cfg.erle.num_sections = num_sections;
+        if (EchoCanceller3Config::Validate(&cfg)) {
+          SignalDependentErleEstimator s(cfg);
+          std::array<float, kFftLengthBy2Plus1> average_erle;
+          average_erle.fill(cfg.erle.max_l);
+          TestInputs inputs(cfg);
+          for (size_t n = 0; n < 10; ++n) {
+            inputs.Update();
+            s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+                     inputs.GetY2(), inputs.GetE2(), average_erle, true);
+          }
+        }
+      }
+    }
+  }
+}
+
+TEST(SignalDependentErleEstimator, LongerRun) {
+  EchoCanceller3Config cfg;
+  cfg.filter.main.length_blocks = 2;
+  cfg.filter.main_initial.length_blocks = 1;
+  cfg.delay.delay_headroom_blocks = 0;
+  cfg.delay.hysteresis_limit_1_blocks = 0;
+  cfg.erle.num_sections = 2;
+  EXPECT_EQ(EchoCanceller3Config::Validate(&cfg), true);
+  std::array<float, kFftLengthBy2Plus1> average_erle;
+  average_erle.fill(cfg.erle.max_l);
+  SignalDependentErleEstimator s(cfg);
+  TestInputs inputs(cfg);
+  for (size_t n = 0; n < 200; ++n) {
+    inputs.Update();
+    s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+             inputs.GetY2(), inputs.GetE2(), average_erle, true);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subband_erle_estimator.cc b/modules/audio_processing/aec3/subband_erle_estimator.cc
index 2cb5acc..9453e57 100644
--- a/modules/audio_processing/aec3/subband_erle_estimator.cc
+++ b/modules/audio_processing/aec3/subband_erle_estimator.cc
@@ -11,12 +11,8 @@
 #include "modules/audio_processing/aec3/subband_erle_estimator.h"
 
 #include <algorithm>
-#include <memory>
+#include <functional>
 
-#include "absl/types/optional.h"
-#include "api/array_view.h"
-#include "modules/audio_processing/aec3/aec3_common.h"
-#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/numerics/safe_minmax.h"
 #include "system_wrappers/include/field_trial.h"
@@ -24,23 +20,29 @@
 namespace webrtc {
 
 namespace {
-constexpr int kPointsToAccumulate = 6;
+
 constexpr float kX2BandEnergyThreshold = 44015068.0f;
-constexpr int kErleHold = 100;
-constexpr int kBlocksForOnsetDetection = kErleHold + 150;
+constexpr int kBlocksToHoldErle = 100;
+constexpr int kBlocksForOnsetDetection = kBlocksToHoldErle + 150;
+constexpr int kPointsToAccumulate = 6;
 
 bool EnableAdaptErleOnLowRender() {
   return !field_trial::IsEnabled("WebRTC-Aec3AdaptErleOnLowRenderKillSwitch");
 }
 
+std::array<float, kFftLengthBy2Plus1> SetMaxErleBands(float max_erle_l,
+                                                      float max_erle_h) {
+  std::array<float, kFftLengthBy2Plus1> max_erle;
+  std::fill(max_erle.begin(), max_erle.begin() + kFftLengthBy2 / 2, max_erle_l);
+  std::fill(max_erle.begin() + kFftLengthBy2 / 2, max_erle.end(), max_erle_h);
+  return max_erle;
+}
+
 }  // namespace
 
-SubbandErleEstimator::SubbandErleEstimator(float min_erle,
-                                           float max_erle_lf,
-                                           float max_erle_hf)
-    : min_erle_(min_erle),
-      max_erle_lf_(max_erle_lf),
-      max_erle_hf_(max_erle_hf),
+SubbandErleEstimator::SubbandErleEstimator(const EchoCanceller3Config& config)
+    : min_erle_(config.erle.min),
+      max_erle_(SetMaxErleBands(config.erle.max_l, config.erle.max_h)),
       adapt_on_low_render_(EnableAdaptErleOnLowRender()) {
   Reset();
 }
@@ -50,8 +52,9 @@
 void SubbandErleEstimator::Reset() {
   erle_.fill(min_erle_);
   erle_onsets_.fill(min_erle_);
-  hold_counters_.fill(0);
   coming_onset_.fill(true);
+  hold_counters_.fill(0);
+  ResetAccumulatedSpectra();
 }
 
 void SubbandErleEstimator::Update(rtc::ArrayView<const float> X2,
@@ -63,10 +66,8 @@
     // Note that the use of the converged_filter flag already imposed
     // a minimum of the erle that can be estimated as that flag would
     // be false if the filter is performing poorly.
-    constexpr size_t kFftLengthBy4 = kFftLengthBy2 / 2;
-    UpdateBands(X2, Y2, E2, 1, kFftLengthBy4, max_erle_lf_, onset_detection);
-    UpdateBands(X2, Y2, E2, kFftLengthBy4, kFftLengthBy2, max_erle_hf_,
-                onset_detection);
+    UpdateAccumulatedSpectra(X2, Y2, E2);
+    UpdateBands(onset_detection);
   }
 
   if (onset_detection) {
@@ -79,61 +80,53 @@
 
 void SubbandErleEstimator::Dump(
     const std::unique_ptr<ApmDataDumper>& data_dumper) const {
-  data_dumper->DumpRaw("aec3_erle", Erle());
   data_dumper->DumpRaw("aec3_erle_onset", ErleOnsets());
 }
 
-void SubbandErleEstimator::UpdateBands(rtc::ArrayView<const float> X2,
-                                       rtc::ArrayView<const float> Y2,
-                                       rtc::ArrayView<const float> E2,
-                                       size_t start,
-                                       size_t stop,
-                                       float max_erle,
-                                       bool onset_detection) {
-  auto erle_band_update = [](float erle_band, float new_erle,
-                             bool low_render_energy, float alpha_inc,
-                             float alpha_dec, float min_erle, float max_erle) {
-    if (new_erle < erle_band && low_render_energy) {
-      // Decreases are not allowed if low render energy signals were used for
-      // the erle computation.
-      return erle_band;
+void SubbandErleEstimator::UpdateBands(bool onset_detection) {
+  std::array<float, kFftLengthBy2> new_erle;
+  std::array<bool, kFftLengthBy2> is_erle_updated;
+  is_erle_updated.fill(false);
+
+  for (size_t k = 1; k < kFftLengthBy2; ++k) {
+    if (accum_spectra_.num_points_[k] == kPointsToAccumulate &&
+        accum_spectra_.E2_[k] > 0.f) {
+      new_erle[k] = accum_spectra_.Y2_[k] / accum_spectra_.E2_[k];
+      is_erle_updated[k] = true;
     }
-    float alpha = new_erle > erle_band ? alpha_inc : alpha_dec;
-    float erle_band_out = erle_band;
-    erle_band_out = erle_band + alpha * (new_erle - erle_band);
-    erle_band_out = rtc::SafeClamp(erle_band_out, min_erle, max_erle);
-    return erle_band_out;
-  };
+  }
 
-  for (size_t k = start; k < stop; ++k) {
-    if (adapt_on_low_render_ || X2[k] > kX2BandEnergyThreshold) {
-      bool low_render_energy = false;
-      absl::optional<float> new_erle = instantaneous_erle_.Update(
-          X2[k], Y2[k], E2[k], k, &low_render_energy);
-      if (new_erle) {
-        RTC_DCHECK(adapt_on_low_render_ || !low_render_energy);
-        if (onset_detection && !low_render_energy) {
-          if (coming_onset_[k]) {
-            coming_onset_[k] = false;
-            erle_onsets_[k] = erle_band_update(
-                erle_onsets_[k], new_erle.value(), low_render_energy, 0.15f,
-                0.3f, min_erle_, max_erle);
-          }
-          hold_counters_[k] = kBlocksForOnsetDetection;
+  if (onset_detection) {
+    for (size_t k = 1; k < kFftLengthBy2; ++k) {
+      if (is_erle_updated[k] && !accum_spectra_.low_render_energy_[k]) {
+        if (coming_onset_[k]) {
+          coming_onset_[k] = false;
+          float alpha = new_erle[k] < erle_onsets_[k] ? 0.3f : 0.15f;
+          erle_onsets_[k] = rtc::SafeClamp(
+              erle_onsets_[k] + alpha * (new_erle[k] - erle_onsets_[k]),
+              min_erle_, max_erle_[k]);
         }
-
-        erle_[k] =
-            erle_band_update(erle_[k], new_erle.value(), low_render_energy,
-                             0.05f, 0.1f, min_erle_, max_erle);
+        hold_counters_[k] = kBlocksForOnsetDetection;
       }
     }
   }
+
+  for (size_t k = 1; k < kFftLengthBy2; ++k) {
+    if (is_erle_updated[k]) {
+      float alpha = 0.05f;
+      if (new_erle[k] < erle_[k]) {
+        alpha = accum_spectra_.low_render_energy_[k] ? 0.f : 0.1f;
+      }
+      erle_[k] = rtc::SafeClamp(erle_[k] + alpha * (new_erle[k] - erle_[k]),
+                                min_erle_, max_erle_[k]);
+    }
+  }
 }
 
 void SubbandErleEstimator::DecreaseErlePerBandForLowRenderSignals() {
   for (size_t k = 1; k < kFftLengthBy2; ++k) {
     hold_counters_[k]--;
-    if (hold_counters_[k] <= (kBlocksForOnsetDetection - kErleHold)) {
+    if (hold_counters_[k] <= (kBlocksForOnsetDetection - kBlocksToHoldErle)) {
       if (erle_[k] > erle_onsets_[k]) {
         erle_[k] = std::max(erle_onsets_[k], 0.97f * erle_[k]);
         RTC_DCHECK_LE(min_erle_, erle_[k]);
@@ -146,43 +139,55 @@
   }
 }
 
-SubbandErleEstimator::ErleInstantaneous::ErleInstantaneous() {
-  Reset();
+void SubbandErleEstimator::ResetAccumulatedSpectra() {
+  accum_spectra_.Y2_.fill(0.f);
+  accum_spectra_.E2_.fill(0.f);
+  accum_spectra_.num_points_.fill(0);
+  accum_spectra_.low_render_energy_.fill(false);
 }
 
-SubbandErleEstimator::ErleInstantaneous::~ErleInstantaneous() = default;
-
-absl::optional<float> SubbandErleEstimator::ErleInstantaneous::Update(
-    float X2,
-    float Y2,
-    float E2,
-    size_t band,
-    bool* low_render_energy) {
-  absl::optional<float> erle_instantaneous = absl::nullopt;
-  RTC_DCHECK_LT(band, kFftLengthBy2Plus1);
-  Y2_acum_[band] += Y2;
-  E2_acum_[band] += E2;
-  low_render_energy_[band] =
-      low_render_energy_[band] || X2 < kX2BandEnergyThreshold;
-  if (++num_points_[band] == kPointsToAccumulate) {
-    if (E2_acum_[band]) {
-      erle_instantaneous = Y2_acum_[band] / E2_acum_[band];
+void SubbandErleEstimator::UpdateAccumulatedSpectra(
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2) {
+  auto& st = accum_spectra_;
+  if (adapt_on_low_render_) {
+    if (st.num_points_[0] == kPointsToAccumulate) {
+      st.num_points_[0] = 0;
+      st.Y2_.fill(0.f);
+      st.E2_.fill(0.f);
+      st.low_render_energy_.fill(false);
     }
-    *low_render_energy = low_render_energy_[band];
-    num_points_[band] = 0;
-    Y2_acum_[band] = 0.f;
-    E2_acum_[band] = 0.f;
-    low_render_energy_[band] = false;
+    std::transform(Y2.begin(), Y2.end(), st.Y2_.begin(), st.Y2_.begin(),
+                   std::plus<float>());
+    std::transform(E2.begin(), E2.end(), st.E2_.begin(), st.E2_.begin(),
+                   std::plus<float>());
+
+    for (size_t k = 0; k < X2.size(); ++k) {
+      st.low_render_energy_[k] =
+          st.low_render_energy_[k] || X2[k] < kX2BandEnergyThreshold;
+    }
+    st.num_points_[0]++;
+    st.num_points_.fill(st.num_points_[0]);
+
+  } else {
+    // The update is always done using high render energy signals and
+    // therefore the field accum_spectra_.low_render_energy_ does not need to
+    // be modified.
+    for (size_t k = 0; k < X2.size(); ++k) {
+      if (X2[k] > kX2BandEnergyThreshold) {
+        if (st.num_points_[k] == kPointsToAccumulate) {
+          st.Y2_[k] = 0.f;
+          st.E2_[k] = 0.f;
+          st.num_points_[k] = 0;
+        }
+        st.Y2_[k] += Y2[k];
+        st.E2_[k] += E2[k];
+        st.num_points_[k]++;
+      }
+      RTC_DCHECK_EQ(st.low_render_energy_[k], false);
+    }
   }
-
-  return erle_instantaneous;
-}
-
-void SubbandErleEstimator::ErleInstantaneous::Reset() {
-  Y2_acum_.fill(0.f);
-  E2_acum_.fill(0.f);
-  low_render_energy_.fill(false);
-  num_points_.fill(0);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subband_erle_estimator.h b/modules/audio_processing/aec3/subband_erle_estimator.h
index 7693b6a..b9862db 100644
--- a/modules/audio_processing/aec3/subband_erle_estimator.h
+++ b/modules/audio_processing/aec3/subband_erle_estimator.h
@@ -14,9 +14,10 @@
 #include <stddef.h>
 #include <array>
 #include <memory>
+#include <vector>
 
-#include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
 #include "modules/audio_processing/logging/apm_data_dumper.h"
 
@@ -25,7 +26,7 @@
 // Estimates the echo return loss enhancement for each frequency subband.
 class SubbandErleEstimator {
  public:
-  SubbandErleEstimator(float min_erle, float max_erle_lf, float max_erle_hf);
+  explicit SubbandErleEstimator(const EchoCanceller3Config& config);
   ~SubbandErleEstimator();
 
   // Resets the ERLE estimator.
@@ -42,55 +43,35 @@
   const std::array<float, kFftLengthBy2Plus1>& Erle() const { return erle_; }
 
   // Returns the ERLE estimate at onsets.
-  const std::array<float, kFftLengthBy2Plus1>& ErleOnsets() const {
-    return erle_onsets_;
-  }
+  rtc::ArrayView<const float> ErleOnsets() const { return erle_onsets_; }
 
   void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
 
  private:
-  void UpdateBands(rtc::ArrayView<const float> X2,
-                   rtc::ArrayView<const float> Y2,
-                   rtc::ArrayView<const float> E2,
-                   size_t start,
-                   size_t stop,
-                   float max_erle,
-                   bool onset_detection);
-  void DecreaseErlePerBandForLowRenderSignals();
-
-  class ErleInstantaneous {
-   public:
-    ErleInstantaneous();
-    ~ErleInstantaneous();
-    // Updates the ERLE for a band with a new block. Returns absl::nullopt
-    // if not enough points were accumulated for doing the estimation,
-    // otherwise, it returns the ERLE. When the ERLE is returned, the
-    // low_render_energy flag contains information on whether the estimation was
-    // done using low level render signals.
-    absl::optional<float> Update(float X2,
-                                 float Y2,
-                                 float E2,
-                                 size_t band,
-                                 bool* low_render_energy);
-    // Resets the ERLE estimator to its initial state.
-    void Reset();
-
-   private:
-    std::array<float, kFftLengthBy2Plus1> Y2_acum_;
-    std::array<float, kFftLengthBy2Plus1> E2_acum_;
+  struct AccumulatedSpectra {
+    std::array<float, kFftLengthBy2Plus1> Y2_;
+    std::array<float, kFftLengthBy2Plus1> E2_;
     std::array<bool, kFftLengthBy2Plus1> low_render_energy_;
     std::array<int, kFftLengthBy2Plus1> num_points_;
   };
 
-  ErleInstantaneous instantaneous_erle_;
+  void UpdateAccumulatedSpectra(rtc::ArrayView<const float> X2,
+                                rtc::ArrayView<const float> Y2,
+                                rtc::ArrayView<const float> E2);
+
+  void ResetAccumulatedSpectra();
+
+  void UpdateBands(bool onset_detection);
+  void DecreaseErlePerBandForLowRenderSignals();
+
+  const float min_erle_;
+  const std::array<float, kFftLengthBy2Plus1> max_erle_;
+  const bool adapt_on_low_render_;
+  AccumulatedSpectra accum_spectra_;
   std::array<float, kFftLengthBy2Plus1> erle_;
   std::array<float, kFftLengthBy2Plus1> erle_onsets_;
   std::array<bool, kFftLengthBy2Plus1> coming_onset_;
   std::array<int, kFftLengthBy2Plus1> hold_counters_;
-  const float min_erle_;
-  const float max_erle_lf_;
-  const float max_erle_hf_;
-  const bool adapt_on_low_render_;
 };
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_gain.cc b/modules/audio_processing/aec3/suppression_gain.cc
index 88cfc0a..c6d2bf6 100644
--- a/modules/audio_processing/aec3/suppression_gain.cc
+++ b/modules/audio_processing/aec3/suppression_gain.cc
@@ -419,7 +419,7 @@
   // Detect strong active nearend if the nearend is sufficiently stronger than
   // the echo and the nearend noise.
   if ((!initial_state || use_during_initial_phase_) &&
-      ne_sum > enr_threshold_ * echo_sum &&
+      echo_sum < enr_threshold_ * ne_sum &&
       ne_sum > snr_threshold_ * noise_sum) {
     if (++trigger_counter_ >= trigger_threshold_) {
       // After a period of strong active nearend activity, flag nearend mode.
@@ -432,7 +432,7 @@
   }
 
   // Exit nearend-state early at strong echo.
-  if (ne_sum < enr_exit_threshold_ * echo_sum &&
+  if (echo_sum > enr_exit_threshold_ * ne_sum &&
       echo_sum > snr_threshold_ * noise_sum) {
     hold_counter_ = 0;
   }
diff --git a/modules/audio_processing/agc2/BUILD.gn b/modules/audio_processing/agc2/BUILD.gn
index 18f2d78..5431a15 100644
--- a/modules/audio_processing/agc2/BUILD.gn
+++ b/modules/audio_processing/agc2/BUILD.gn
@@ -27,6 +27,7 @@
     ":gain_applier",
     ":noise_level_estimator",
     ":rnn_vad_with_level",
+    "..:api",
     "..:apm_logging",
     "..:audio_frame_view",
     "../../../api:array_view",
@@ -58,6 +59,7 @@
     ":gain_applier",
     ":noise_level_estimator",
     ":rnn_vad_with_level",
+    "..:api",
     "..:apm_logging",
     "..:audio_frame_view",
     "../../../api:array_view",
@@ -190,8 +192,8 @@
     "../../../api:array_view",
     "../../../common_audio",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
   ]
 }
 
@@ -202,7 +204,7 @@
   ]
   deps = [
     ":biquad_filter",
-    "../../../rtc_base:rtc_base_tests_utils",
+    "../../../rtc_base:gunit_helpers",
   ]
 }
 
@@ -230,8 +232,8 @@
     "../../../api:array_view",
     "../../../common_audio",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
     "../../../system_wrappers:metrics",
     "//third_party/abseil-cpp/absl/memory",
   ]
@@ -252,8 +254,20 @@
     "..:audio_frame_view",
     "../../../api:array_view",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
+  ]
+}
+
+rtc_source_set("rnn_vad_with_level_unittests") {
+  testonly = true
+  sources = [
+    "vad_with_level_unittest.cc",
+  ]
+  deps = [
+    ":rnn_vad_with_level",
+    "..:audio_frame_view",
+    "../../../rtc_base:gunit_helpers",
   ]
 }
 
diff --git a/modules/audio_processing/agc2/adaptive_agc.cc b/modules/audio_processing/agc2/adaptive_agc.cc
index 795b8b5..a5d3608 100644
--- a/modules/audio_processing/agc2/adaptive_agc.cc
+++ b/modules/audio_processing/agc2/adaptive_agc.cc
@@ -26,8 +26,12 @@
 }
 
 AdaptiveAgc::AdaptiveAgc(ApmDataDumper* apm_data_dumper,
-                         float extra_saturation_margin_db)
-    : speech_level_estimator_(apm_data_dumper, extra_saturation_margin_db),
+                         const AudioProcessing::Config::GainController2& config)
+    : speech_level_estimator_(
+          apm_data_dumper,
+          config.adaptive_digital.level_estimator,
+          config.adaptive_digital.use_saturation_protector,
+          config.adaptive_digital.extra_saturation_margin_db),
       gain_applier_(apm_data_dumper),
       apm_data_dumper_(apm_data_dumper),
       noise_level_estimator_(apm_data_dumper) {
@@ -44,9 +48,9 @@
                             signal_with_levels.vad_result.speech_probability);
   apm_data_dumper_->DumpRaw("agc2_vad_rms_dbfs",
                             signal_with_levels.vad_result.speech_rms_dbfs);
-
   apm_data_dumper_->DumpRaw("agc2_vad_peak_dbfs",
                             signal_with_levels.vad_result.speech_peak_dbfs);
+
   speech_level_estimator_.UpdateEstimation(signal_with_levels.vad_result);
 
   signal_with_levels.input_level_dbfs =
@@ -68,7 +72,6 @@
 
   // The gain applier applies the gain.
   gain_applier_.Process(signal_with_levels);
-  ;
 }
 
 void AdaptiveAgc::Reset() {
diff --git a/modules/audio_processing/agc2/adaptive_agc.h b/modules/audio_processing/agc2/adaptive_agc.h
index 6c0917a..16c0082 100644
--- a/modules/audio_processing/agc2/adaptive_agc.h
+++ b/modules/audio_processing/agc2/adaptive_agc.h
@@ -16,6 +16,7 @@
 #include "modules/audio_processing/agc2/noise_level_estimator.h"
 #include "modules/audio_processing/agc2/vad_with_level.h"
 #include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
 
 namespace webrtc {
 class ApmDataDumper;
@@ -23,7 +24,8 @@
 class AdaptiveAgc {
  public:
   explicit AdaptiveAgc(ApmDataDumper* apm_data_dumper);
-  AdaptiveAgc(ApmDataDumper* apm_data_dumper, float extra_saturation_margin_db);
+  AdaptiveAgc(ApmDataDumper* apm_data_dumper,
+              const AudioProcessing::Config::GainController2& config);
   ~AdaptiveAgc();
 
   void Process(AudioFrameView<float> float_frame, float last_audio_level);
diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
index 138faec..8640324 100644
--- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
+++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
@@ -19,13 +19,20 @@
 
 AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator(
     ApmDataDumper* apm_data_dumper)
-    : saturation_protector_(apm_data_dumper),
+    : level_estimator_(
+          AudioProcessing::Config::GainController2::LevelEstimator::kRms),
+      use_saturation_protector_(true),
+      saturation_protector_(apm_data_dumper),
       apm_data_dumper_(apm_data_dumper) {}
 
 AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator(
     ApmDataDumper* apm_data_dumper,
+    AudioProcessing::Config::GainController2::LevelEstimator level_estimator,
+    bool use_saturation_protector,
     float extra_saturation_margin_db)
-    : saturation_protector_(apm_data_dumper, extra_saturation_margin_db),
+    : level_estimator_(level_estimator),
+      use_saturation_protector_(use_saturation_protector),
+      saturation_protector_(apm_data_dumper, extra_saturation_margin_db),
       apm_data_dumper_(apm_data_dumper) {}
 
 void AdaptiveModeLevelEstimator::UpdateEstimation(
@@ -49,20 +56,38 @@
 
   const float leak_factor = buffer_is_full ? kFullBufferLeakFactor : 1.f;
 
+  // Read speech level estimation.
+  float speech_level_dbfs = 0.f;
+  using LevelEstimatorType =
+      AudioProcessing::Config::GainController2::LevelEstimator;
+  switch (level_estimator_) {
+    case LevelEstimatorType::kRms:
+      speech_level_dbfs = vad_data.speech_rms_dbfs;
+      break;
+    case LevelEstimatorType::kPeak:
+      speech_level_dbfs = vad_data.speech_peak_dbfs;
+      break;
+  }
+
+  // Update speech level estimation.
   estimate_numerator_ = estimate_numerator_ * leak_factor +
-                        vad_data.speech_rms_dbfs * vad_data.speech_probability;
+                        speech_level_dbfs * vad_data.speech_probability;
   estimate_denominator_ =
       estimate_denominator_ * leak_factor + vad_data.speech_probability;
-
   last_estimate_with_offset_dbfs_ = estimate_numerator_ / estimate_denominator_;
 
-  saturation_protector_.UpdateMargin(vad_data, last_estimate_with_offset_dbfs_);
-  DebugDumpEstimate();
+  if (use_saturation_protector_) {
+    saturation_protector_.UpdateMargin(vad_data,
+                                       last_estimate_with_offset_dbfs_);
+    DebugDumpEstimate();
+  }
 }
 
 float AdaptiveModeLevelEstimator::LatestLevelEstimate() const {
   return rtc::SafeClamp<float>(
-      last_estimate_with_offset_dbfs_ + saturation_protector_.LastMargin(),
+      last_estimate_with_offset_dbfs_ +
+          (use_saturation_protector_ ? saturation_protector_.LastMargin()
+                                     : 0.f),
       -90.f, 30.f);
 }
 
diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
index f887268..63b9de2 100644
--- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
+++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
@@ -16,6 +16,7 @@
 #include "modules/audio_processing/agc2/agc2_common.h"  // kFullBufferSizeMs...
 #include "modules/audio_processing/agc2/saturation_protector.h"
 #include "modules/audio_processing/agc2/vad_with_level.h"
+#include "modules/audio_processing/include/audio_processing.h"
 
 namespace webrtc {
 class ApmDataDumper;
@@ -23,8 +24,11 @@
 class AdaptiveModeLevelEstimator {
  public:
   explicit AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper);
-  AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper,
-                             float extra_saturation_margin_db);
+  AdaptiveModeLevelEstimator(
+      ApmDataDumper* apm_data_dumper,
+      AudioProcessing::Config::GainController2::LevelEstimator level_estimator,
+      bool use_saturation_protector,
+      float extra_saturation_margin_db);
   void UpdateEstimation(const VadWithLevel::LevelAndProbability& vad_data);
   float LatestLevelEstimate() const;
   void Reset();
@@ -35,6 +39,9 @@
  private:
   void DebugDumpEstimate();
 
+  const AudioProcessing::Config::GainController2::LevelEstimator
+      level_estimator_;
+  const bool use_saturation_protector_;
   size_t buffer_size_ms_ = 0;
   float last_estimate_with_offset_dbfs_ = kInitialSpeechLevelEstimateDbfs;
   float estimate_numerator_ = 0.f;
diff --git a/modules/audio_processing/agc2/vad_with_level_unittest.cc b/modules/audio_processing/agc2/vad_with_level_unittest.cc
new file mode 100644
index 0000000..f9aee62
--- /dev/null
+++ b/modules/audio_processing/agc2/vad_with_level_unittest.cc
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vad_with_level.h"
+
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace test {
+
+TEST(AutomaticGainController2VadWithLevelEstimator,
+     PeakLevelGreaterThanRmsLevel) {
+  constexpr size_t kSampleRateHz = 8000;
+
+  // 10 ms input frame, constant except for one peak value.
+  // Handcrafted so that the average is lower than the peak value.
+  std::array<float, kSampleRateHz / 100> frame;
+  frame.fill(1000.f);
+  frame[10] = 2000.f;
+  float* const channel0 = frame.data();
+  AudioFrameView<float> frame_view(&channel0, 1, frame.size());
+
+  // Compute audio frame levels (the VAD result is ignored).
+  VadWithLevel vad_with_level;
+  auto levels_and_vad_prob = vad_with_level.AnalyzeFrame(frame_view);
+
+  // Compare peak and RMS levels.
+  EXPECT_LT(levels_and_vad_prob.speech_rms_dbfs,
+            levels_and_vad_prob.speech_peak_dbfs);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index 3764647..2937c06 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -259,6 +259,7 @@
   std::unique_ptr<CustomProcessing> render_pre_processor;
   std::unique_ptr<GainApplier> pre_amplifier;
   std::unique_ptr<CustomAudioAnalyzer> capture_analyzer;
+  std::unique_ptr<LevelEstimatorImpl> output_level_estimator;
 };
 
 AudioProcessingBuilder::AudioProcessingBuilder() = default;
@@ -673,6 +674,13 @@
                    << config_.gain_controller2.enabled;
   RTC_LOG(LS_INFO) << "Pre-amplifier activated: "
                    << config_.pre_amplifier.enabled;
+
+  if (config_.level_estimation.enabled &&
+      !private_submodules_->output_level_estimator) {
+    private_submodules_->output_level_estimator.reset(
+        new LevelEstimatorImpl(&crit_capture_));
+    private_submodules_->output_level_estimator->Enable(true);
+  }
 }
 
 void AudioProcessingImpl::SetExtraOptions(const webrtc::Config& config) {
@@ -1336,6 +1344,13 @@
 
   // The level estimator operates on the recombined data.
   public_submodules_->level_estimator->ProcessStream(capture_buffer);
+  if (config_.level_estimation.enabled) {
+    private_submodules_->output_level_estimator->ProcessStream(capture_buffer);
+    capture_.stats.output_rms_dbfs =
+        private_submodules_->output_level_estimator->RMS();
+  } else {
+    capture_.stats.output_rms_dbfs = absl::nullopt;
+  }
 
   capture_output_rms_.Analyze(rtc::ArrayView<const int16_t>(
       capture_buffer->channels_const()[0],
@@ -1587,49 +1602,50 @@
 
 AudioProcessingStats AudioProcessingImpl::GetStatistics(
     bool has_remote_tracks) const {
-  AudioProcessingStats stats;
-  if (has_remote_tracks) {
-    EchoCancellationImpl::Metrics metrics;
-    rtc::CritScope cs_capture(&crit_capture_);
-    if (private_submodules_->echo_controller) {
-      auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
-      stats.echo_return_loss = ec_metrics.echo_return_loss;
+  rtc::CritScope cs_capture(&crit_capture_);
+  if (!has_remote_tracks) {
+    return capture_.stats;
+  }
+  AudioProcessingStats stats = capture_.stats;
+  EchoCancellationImpl::Metrics metrics;
+  if (private_submodules_->echo_controller) {
+    auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
+    stats.echo_return_loss = ec_metrics.echo_return_loss;
+    stats.echo_return_loss_enhancement =
+        ec_metrics.echo_return_loss_enhancement;
+    stats.delay_ms = ec_metrics.delay_ms;
+  } else if (private_submodules_->echo_cancellation->GetMetrics(&metrics) ==
+             Error::kNoError) {
+    if (metrics.divergent_filter_fraction != -1.0f) {
+      stats.divergent_filter_fraction =
+          absl::optional<double>(metrics.divergent_filter_fraction);
+    }
+    if (metrics.echo_return_loss.instant != -100) {
+      stats.echo_return_loss =
+          absl::optional<double>(metrics.echo_return_loss.instant);
+    }
+    if (metrics.echo_return_loss_enhancement.instant != -100) {
       stats.echo_return_loss_enhancement =
-          ec_metrics.echo_return_loss_enhancement;
-      stats.delay_ms = ec_metrics.delay_ms;
-    } else if (private_submodules_->echo_cancellation->GetMetrics(&metrics) ==
-               Error::kNoError) {
-      if (metrics.divergent_filter_fraction != -1.0f) {
-        stats.divergent_filter_fraction =
-            absl::optional<double>(metrics.divergent_filter_fraction);