Sync to webrtc Tot

Update to latest lkgr commit
https://webrtc.googlesource.com/src/+/0cc11b4b947e0ceae14e717aa25ceffc480529a3

This is to include recent important changes of AEC3:

- https://webrtc-review.googlesource.com/c/src/+/112136
- https://webrtc-review.googlesource.com/c/src/+/111602
- https://webrtc-review.googlesource.com/c/src/+/109400

Note that there are some apm/aec config keys modified in this change.
Existing user should fix them separately.

BUG=None
TEST=emerge-nocturne webrtc-apm

Change-Id: Ib5517526db1a01dffa673d8ab0dc828ceca73535
Reviewed-on: https://chromium-review.googlesource.com/1354142
Commit-Ready: ChromeOS CL Exonerator Bot <chromiumos-cl-exonerator@appspot.gserviceaccount.com>
Tested-by: Hsinyu Chao <hychao@chromium.org>
Reviewed-by: Cheng-Yi Chiang <cychiang@chromium.org>
Reviewed-by: Per Ã…hgren <peah@chromium.org>
diff --git a/api/audio/echo_canceller3_config.cc b/api/audio/echo_canceller3_config.cc
index 29d0b9a..3eb2a8d 100644
--- a/api/audio/echo_canceller3_config.cc
+++ b/api/audio/echo_canceller3_config.cc
@@ -148,11 +148,12 @@
     c->erle.min = std::min(c->erle.max_l, c->erle.max_h);
     res = false;
   }
+  res = res & Limit(&c->erle.num_sections, 1, c->filter.main.length_blocks);
 
   res = res & Limit(&c->ep_strength.lf, 0.f, 1000000.f);
   res = res & Limit(&c->ep_strength.mf, 0.f, 1000000.f);
   res = res & Limit(&c->ep_strength.hf, 0.f, 1000000.f);
-  res = res & Limit(&c->ep_strength.default_len, 0.f, 1.f);
+  res = res & Limit(&c->ep_strength.default_len, -1.f, 1.f);
 
   res =
       res & Limit(&c->echo_audibility.low_render_limit, 0.f, 32768.f * 32768.f);
@@ -243,6 +244,12 @@
 
   res = res & Limit(&c->suppressor.floor_first_increase, 0.f, 1000000.f);
 
+  if (c->delay.delay_headroom_blocks >
+      c->filter.main_initial.length_blocks - 1) {
+    c->delay.delay_headroom_blocks = c->filter.main_initial.length_blocks - 1;
+    res = false;
+  }
+
   return res;
 }
 }  // namespace webrtc
diff --git a/api/audio/echo_canceller3_config.h b/api/audio/echo_canceller3_config.h
index 251f282..ffe17f2 100644
--- a/api/audio/echo_canceller3_config.h
+++ b/api/audio/echo_canceller3_config.h
@@ -87,6 +87,7 @@
     float max_l = 4.f;
     float max_h = 1.5f;
     bool onset_detection = true;
+    size_t num_sections = 1;
   } erle;
 
   struct EpStrength {
@@ -106,8 +107,8 @@
     float audibility_threshold_lf = 10;
     float audibility_threshold_mf = 10;
     float audibility_threshold_hf = 10;
-    bool use_stationary_properties = true;
-    bool use_stationarity_properties_at_init = true;
+    bool use_stationary_properties = false;
+    bool use_stationarity_properties_at_init = false;
   } echo_audibility;
 
   struct RenderLevels {
@@ -181,8 +182,8 @@
                                    0.25f);
 
     struct DominantNearendDetection {
-      float enr_threshold = 4.f;
-      float enr_exit_threshold = .1f;
+      float enr_threshold = .25f;
+      float enr_exit_threshold = 10.f;
       float snr_threshold = 30.f;
       int hold_duration = 50;
       int trigger_threshold = 12;
diff --git a/api/audio/echo_canceller3_config_json.cc b/api/audio/echo_canceller3_config_json.cc
index d039c8b..01a831c 100644
--- a/api/audio/echo_canceller3_config_json.cc
+++ b/api/audio/echo_canceller3_config_json.cc
@@ -197,6 +197,7 @@
     ReadParam(section, "max_l", &cfg.erle.max_l);
     ReadParam(section, "max_h", &cfg.erle.max_h);
     ReadParam(section, "onset_detection", &cfg.erle.onset_detection);
+    ReadParam(section, "num_sections", &cfg.erle.num_sections);
   }
 
   if (rtc::GetValueFromJsonObject(aec3_root, "ep_strength", &section)) {
@@ -425,7 +426,8 @@
   ost << "\"max_l\": " << config.erle.max_l << ",";
   ost << "\"max_h\": " << config.erle.max_h << ",";
   ost << "\"onset_detection\": "
-      << (config.erle.onset_detection ? "true" : "false");
+      << (config.erle.onset_detection ? "true" : "false") << ",";
+  ost << "\"num_sections\": " << config.erle.num_sections;
   ost << "},";
 
   ost << "\"ep_strength\": {";
diff --git a/api/audio_options.cc b/api/audio_options.cc
index d464118..e33214b 100644
--- a/api/audio_options.cc
+++ b/api/audio_options.cc
@@ -49,6 +49,8 @@
           change.audio_jitter_buffer_max_packets);
   SetFrom(&audio_jitter_buffer_fast_accelerate,
           change.audio_jitter_buffer_fast_accelerate);
+  SetFrom(&audio_jitter_buffer_min_delay_ms,
+          change.audio_jitter_buffer_min_delay_ms);
   SetFrom(&typing_detection, change.typing_detection);
   SetFrom(&experimental_agc, change.experimental_agc);
   SetFrom(&extended_filter_aec, change.extended_filter_aec);
@@ -76,6 +78,8 @@
          audio_jitter_buffer_max_packets == o.audio_jitter_buffer_max_packets &&
          audio_jitter_buffer_fast_accelerate ==
              o.audio_jitter_buffer_fast_accelerate &&
+         audio_jitter_buffer_min_delay_ms ==
+             o.audio_jitter_buffer_min_delay_ms &&
          typing_detection == o.typing_detection &&
          experimental_agc == o.experimental_agc &&
          extended_filter_aec == o.extended_filter_aec &&
@@ -107,6 +111,8 @@
                 audio_jitter_buffer_max_packets);
   ToStringIfSet(&result, "audio_jitter_buffer_fast_accelerate",
                 audio_jitter_buffer_fast_accelerate);
+  ToStringIfSet(&result, "audio_jitter_buffer_min_delay_ms",
+                audio_jitter_buffer_min_delay_ms);
   ToStringIfSet(&result, "typing", typing_detection);
   ToStringIfSet(&result, "experimental_agc", experimental_agc);
   ToStringIfSet(&result, "extended_filter_aec", extended_filter_aec);
diff --git a/api/audio_options.h b/api/audio_options.h
index 8ae8319..c2d1f44 100644
--- a/api/audio_options.h
+++ b/api/audio_options.h
@@ -54,6 +54,8 @@
   absl::optional<int> audio_jitter_buffer_max_packets;
   // Audio receiver jitter buffer (NetEq) fast accelerate mode.
   absl::optional<bool> audio_jitter_buffer_fast_accelerate;
+  // Audio receiver jitter buffer (NetEq) minimum target delay in milliseconds.
+  absl::optional<int> audio_jitter_buffer_min_delay_ms;
   // Audio processing to detect typing.
   absl::optional<bool> typing_detection;
   absl::optional<bool> experimental_agc;
diff --git a/api/create_peerconnection_factory.cc b/api/create_peerconnection_factory.cc
new file mode 100644
index 0000000..1a6d086
--- /dev/null
+++ b/api/create_peerconnection_factory.cc
@@ -0,0 +1,182 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "api/create_peerconnection_factory.h"
+
+#include <memory>
+#include <utility>
+
+#include "api/call/callfactoryinterface.h"
+#include "api/peerconnectioninterface.h"
+#include "api/video_codecs/video_decoder_factory.h"
+#include "api/video_codecs/video_encoder_factory.h"
+#include "logging/rtc_event_log/rtc_event_log_factory.h"
+#include "logging/rtc_event_log/rtc_event_log_factory_interface.h"
+#include "media/engine/webrtcmediaengine.h"
+#include "modules/audio_device/include/audio_device.h"
+#include "modules/audio_processing/include/audio_processing.h"
+#include "rtc_base/bind.h"
+#include "rtc_base/scoped_ref_ptr.h"
+#include "rtc_base/thread.h"
+
+namespace webrtc {
+
+#if defined(USE_BUILTIN_SW_CODECS)
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory) {
+  return CreatePeerConnectionFactoryWithAudioMixer(
+      nullptr /*network_thread*/, nullptr /*worker_thread*/,
+      nullptr /*signaling_thread*/, nullptr /*default_adm*/,
+      audio_encoder_factory, audio_decoder_factory,
+      nullptr /*video_encoder_factory*/, nullptr /*video_decoder_factory*/,
+      nullptr /*audio_mixer*/);
+}
+
+// Note: all the other CreatePeerConnectionFactory variants just end up calling
+// this, ultimately.
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing) {
+  rtc::scoped_refptr<AudioProcessing> audio_processing_use = audio_processing;
+  if (!audio_processing_use) {
+    audio_processing_use = AudioProcessingBuilder().Create();
+  }
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine(
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          video_encoder_factory, video_decoder_factory, audio_mixer,
+          audio_processing_use));
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+
+  return CreateModularPeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, std::move(media_engine),
+      std::move(call_factory), std::move(event_log_factory));
+}
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
+    std::unique_ptr<NetworkControllerFactoryInterface>
+        network_controller_factory) {
+  rtc::scoped_refptr<AudioProcessing> audio_processing_use = audio_processing;
+  if (!audio_processing_use) {
+    audio_processing_use = AudioProcessingBuilder().Create();
+  }
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine(
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          video_encoder_factory, video_decoder_factory, audio_mixer,
+          audio_processing_use));
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+
+  return CreateModularPeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, std::move(media_engine),
+      std::move(call_factory), std::move(event_log_factory),
+      std::move(fec_controller_factory), std::move(network_controller_factory));
+}
+#endif
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    rtc::scoped_refptr<AudioDeviceModule> default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
+    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing) {
+  if (!audio_processing)
+    audio_processing = AudioProcessingBuilder().Create();
+
+  std::unique_ptr<cricket::MediaEngineInterface> media_engine =
+      cricket::WebRtcMediaEngineFactory::Create(
+          default_adm, audio_encoder_factory, audio_decoder_factory,
+          std::move(video_encoder_factory), std::move(video_decoder_factory),
+          audio_mixer, audio_processing);
+
+  std::unique_ptr<CallFactoryInterface> call_factory = CreateCallFactory();
+
+  std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory =
+      CreateRtcEventLogFactory();
+  PeerConnectionFactoryDependencies dependencies;
+  dependencies.network_thread = network_thread;
+  dependencies.worker_thread = worker_thread;
+  dependencies.signaling_thread = signaling_thread;
+  dependencies.media_engine = std::move(media_engine);
+  dependencies.call_factory = std::move(call_factory);
+  dependencies.event_log_factory = std::move(event_log_factory);
+  return CreateModularPeerConnectionFactory(std::move(dependencies));
+}
+
+#if defined(USE_BUILTIN_SW_CODECS)
+rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactoryWithAudioMixer(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer) {
+  return CreatePeerConnectionFactory(
+      network_thread, worker_thread, signaling_thread, default_adm,
+      audio_encoder_factory, audio_decoder_factory, video_encoder_factory,
+      video_decoder_factory, audio_mixer, nullptr);
+}
+
+rtc::scoped_refptr<PeerConnectionFactoryInterface> CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
+  return CreatePeerConnectionFactoryWithAudioMixer(
+      network_thread, worker_thread, signaling_thread, default_adm,
+      audio_encoder_factory, audio_decoder_factory, video_encoder_factory,
+      video_decoder_factory, nullptr);
+}
+#endif
+
+}  // namespace webrtc
diff --git a/api/create_peerconnection_factory.h b/api/create_peerconnection_factory.h
new file mode 100644
index 0000000..baa50c7
--- /dev/null
+++ b/api/create_peerconnection_factory.h
@@ -0,0 +1,179 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_CREATE_PEERCONNECTION_FACTORY_H_
+#define API_CREATE_PEERCONNECTION_FACTORY_H_
+
+#include <memory>
+
+#include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
+#include "api/audio_codecs/audio_encoder_factory.h"
+#include "api/fec_controller.h"
+#include "api/peerconnectioninterface.h"
+#include "api/transport/network_control.h"
+#include "rtc_base/scoped_ref_ptr.h"
+
+namespace rtc {
+// TODO(bugs.webrtc.org/9987): Move rtc::Thread to api/ or expose a better
+// type. At the moment, rtc::Thread is not part of api/ so it cannot be
+// included in order to avoid to leak internal types.
+class Thread;
+}  // namespace rtc
+
+namespace cricket {
+class WebRtcVideoDecoderFactory;
+class WebRtcVideoEncoderFactory;
+}  // namespace cricket
+
+namespace webrtc {
+
+class AudioDeviceModule;
+class AudioProcessing;
+
+#if defined(USE_BUILTIN_SW_CODECS)
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// This method relies on the thread it's called on as the "signaling thread"
+// for the PeerConnectionFactory it creates.
+//
+// As such, if the current thread is not already running an rtc::Thread message
+// loop, an application using this method must eventually either call
+// rtc::Thread::Current()->Run(), or call
+// rtc::Thread::Current()->ProcessMessages() within the application's own
+// message loop.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+//
+// |network_thread|, |worker_thread| and |signaling_thread| are
+// the only mandatory parameters.
+//
+// If non-null, a reference is added to |default_adm|, and ownership of
+// |video_encoder_factory| and |video_decoder_factory| is transferred to the
+// returned factory.
+// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
+// ownership transfer and ref counting more obvious.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixed and audio processing modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+// Create a new instance of PeerConnectionFactoryInterface with optional
+// external audio mixer, audio processing, and fec controller modules.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+// If |audio_processing| is null, an internal audio processing module will be
+// created and used.
+// If |fec_controller_factory| is null, an internal fec controller module will
+// be created and used.
+// If |network_controller_factory| is provided, it will be used if enabled via
+// field trial.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing,
+    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
+    std::unique_ptr<NetworkControllerFactoryInterface>
+        network_controller_factory = nullptr);
+#endif  // defined(USE_BUILTIN_SW_CODECS)
+
+// Create a new instance of PeerConnectionFactoryInterface with optional video
+// codec factories. These video factories represents all video codecs, i.e. no
+// extra internal video codecs will be added.
+// When building WebRTC with rtc_use_builtin_sw_codecs = false, this is the
+// only available CreatePeerConnectionFactory overload.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    rtc::scoped_refptr<AudioDeviceModule> default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
+    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer,
+    rtc::scoped_refptr<AudioProcessing> audio_processing);
+
+#if defined(USE_BUILTIN_SW_CODECS)
+// Create a new instance of PeerConnectionFactoryInterface with external audio
+// mixer.
+//
+// If |audio_mixer| is null, an internal audio mixer will be created and used.
+RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactoryWithAudioMixer(
+    rtc::Thread* network_thread,
+    rtc::Thread* worker_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
+    rtc::scoped_refptr<AudioMixer> audio_mixer);
+
+// Create a new instance of PeerConnectionFactoryInterface.
+// Same thread is used as worker and network thread.
+RTC_EXPORT inline rtc::scoped_refptr<PeerConnectionFactoryInterface>
+CreatePeerConnectionFactory(
+    rtc::Thread* worker_and_network_thread,
+    rtc::Thread* signaling_thread,
+    AudioDeviceModule* default_adm,
+    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
+    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
+    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
+    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
+  return CreatePeerConnectionFactory(
+      worker_and_network_thread, worker_and_network_thread, signaling_thread,
+      default_adm, audio_encoder_factory, audio_decoder_factory,
+      video_encoder_factory, video_decoder_factory);
+}
+#endif  // defined(USE_BUILTIN_SW_CODECS)
+
+}  // namespace webrtc
+
+#endif  // API_CREATE_PEERCONNECTION_FACTORY_H_
diff --git a/api/media_transport_interface.cc b/api/media_transport_interface.cc
index 039a4a1..ef223aa 100644
--- a/api/media_transport_interface.cc
+++ b/api/media_transport_interface.cc
@@ -114,9 +114,6 @@
 void MediaTransportInterface::RemoveTargetTransferRateObserver(
     webrtc::TargetTransferRateObserver* observer) {}
 
-void MediaTransportInterface::SetTargetTransferRateObserver(
-    webrtc::TargetTransferRateObserver* observer) {}
-
 void MediaTransportInterface::AddTargetTransferRateObserver(
     webrtc::TargetTransferRateObserver* observer) {}
 
diff --git a/api/media_transport_interface.h b/api/media_transport_interface.h
index 7570160..b10dd63 100644
--- a/api/media_transport_interface.h
+++ b/api/media_transport_interface.h
@@ -27,7 +27,6 @@
 #include "api/array_view.h"
 #include "api/rtcerror.h"
 #include "api/video/encoded_image.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "rtc_base/copyonwritebuffer.h"
 #include "rtc_base/networkroute.h"
 
@@ -336,15 +335,6 @@
   // pass a nullptr.
   virtual void SetReceiveVideoSink(MediaTransportVideoSinkInterface* sink) = 0;
 
-  // Sets a target bitrate observer. Before media transport is destructed
-  // the observer must be unregistered (set to nullptr).
-  // A newly registered observer will be called back with the latest recorded
-  // target rate, if available.
-  // TODO(psla): This method will be removed, in favor of
-  // AddTargetTransferRateObserver.
-  virtual void SetTargetTransferRateObserver(
-      TargetTransferRateObserver* observer);
-
   // Adds a target bitrate observer. Before media transport is destructed
   // the observer must be unregistered (by calling
   // RemoveTargetTransferRateObserver).
diff --git a/api/mediastreaminterface.cc b/api/mediastreaminterface.cc
index e36d5cb..955e7e4 100644
--- a/api/mediastreaminterface.cc
+++ b/api/mediastreaminterface.cc
@@ -30,4 +30,8 @@
   return nullptr;
 }
 
+const cricket::AudioOptions AudioSourceInterface::options() const {
+  return {};
+}
+
 }  // namespace webrtc
diff --git a/api/mediastreaminterface.h b/api/mediastreaminterface.h
index 30f8f71..6d96766 100644
--- a/api/mediastreaminterface.h
+++ b/api/mediastreaminterface.h
@@ -23,6 +23,7 @@
 #include <vector>
 
 #include "absl/types/optional.h"
+#include "api/audio_options.h"
 #include "api/video/video_frame.h"
 #include "api/video/video_sink_interface.h"
 #include "api/video/video_source_interface.h"
@@ -207,6 +208,11 @@
   // TODO(tommi): Make pure virtual.
   virtual void AddSink(AudioTrackSinkInterface* sink) {}
   virtual void RemoveSink(AudioTrackSinkInterface* sink) {}
+
+  // Returns options for the AudioSource.
+  // (for some of the settings this approach is broken, e.g. setting
+  // audio network adaptation on the source is the wrong layer of abstraction).
+  virtual const cricket::AudioOptions options() const;
 };
 
 // Interface of the audio processor used by the audio track to collect
diff --git a/api/peerconnectioninterface.h b/api/peerconnectioninterface.h
index 80c3091..54161b8 100644
--- a/api/peerconnectioninterface.h
+++ b/api/peerconnectioninterface.h
@@ -340,6 +340,22 @@
       media_config.video.experiment_cpu_load_estimator = enable;
     }
 
+    int audio_rtcp_report_interval_ms() const {
+      return media_config.audio.rtcp_report_interval_ms;
+    }
+    void set_audio_rtcp_report_interval_ms(int audio_rtcp_report_interval_ms) {
+      media_config.audio.rtcp_report_interval_ms =
+          audio_rtcp_report_interval_ms;
+    }
+
+    int video_rtcp_report_interval_ms() const {
+      return media_config.video.rtcp_report_interval_ms;
+    }
+    void set_video_rtcp_report_interval_ms(int video_rtcp_report_interval_ms) {
+      media_config.video.rtcp_report_interval_ms =
+          video_rtcp_report_interval_ms;
+    }
+
     static const int kUndefined = -1;
     // Default maximum number of packets in the audio jitter buffer.
     static const int kAudioJitterBufferMaxPackets = 50;
@@ -434,6 +450,9 @@
     // if it falls behind.
     bool audio_jitter_buffer_fast_accelerate = false;
 
+    // The minimum delay in milliseconds for the audio jitter buffer.
+    int audio_jitter_buffer_min_delay_ms = 0;
+
     // Timeout in milliseconds before an ICE candidate pair is considered to be
     // "not receiving", after which a lower priority candidate pair may be
     // selected.
@@ -597,6 +616,14 @@
     // settings set in PeerConnectionFactory (which is deprecated).
     absl::optional<CryptoOptions> crypto_options;
 
+    // Configure if we should include the SDP attribute extmap-allow-mixed in
+    // our offer. Although we currently do support this, it's not included in
+    // our offer by default due to a previous bug that caused the SDP parser to
+    // abort parsing if this attribute was present. This is fixed in Chrome 71.
+    // TODO(webrtc:9985): Change default to true once sufficient time has
+    // passed.
+    bool offer_extmap_allow_mixed = false;
+
     //
     // Don't forget to update operator== if adding something.
     //
@@ -1348,142 +1375,6 @@
   ~PeerConnectionFactoryInterface() override = default;
 };
 
-#if defined(USE_BUILTIN_SW_CODECS)
-// Create a new instance of PeerConnectionFactoryInterface.
-//
-// This method relies on the thread it's called on as the "signaling thread"
-// for the PeerConnectionFactory it creates.
-//
-// As such, if the current thread is not already running an rtc::Thread message
-// loop, an application using this method must eventually either call
-// rtc::Thread::Current()->Run(), or call
-// rtc::Thread::Current()->ProcessMessages() within the application's own
-// message loop.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory);
-
-// Create a new instance of PeerConnectionFactoryInterface.
-//
-// |network_thread|, |worker_thread| and |signaling_thread| are
-// the only mandatory parameters.
-//
-// If non-null, a reference is added to |default_adm|, and ownership of
-// |video_encoder_factory| and |video_decoder_factory| is transferred to the
-// returned factory.
-// TODO(deadbeef): Use rtc::scoped_refptr<> and std::unique_ptr<> to make this
-// ownership transfer and ref counting more obvious.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory);
-
-// Create a new instance of PeerConnectionFactoryInterface with optional
-// external audio mixed and audio processing modules.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-// If |audio_processing| is null, an internal audio processing module will be
-// created and used.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing);
-
-// Create a new instance of PeerConnectionFactoryInterface with optional
-// external audio mixer, audio processing, and fec controller modules.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-// If |audio_processing| is null, an internal audio processing module will be
-// created and used.
-// If |fec_controller_factory| is null, an internal fec controller module will
-// be created and used.
-// If |network_controller_factory| is provided, it will be used if enabled via
-// field trial.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing,
-    std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory,
-    std::unique_ptr<NetworkControllerFactoryInterface>
-        network_controller_factory = nullptr);
-#endif
-
-// Create a new instance of PeerConnectionFactoryInterface with optional video
-// codec factories. These video factories represents all video codecs, i.e. no
-// extra internal video codecs will be added.
-// When building WebRTC with rtc_use_builtin_sw_codecs = false, this is the
-// only available CreatePeerConnectionFactory overload.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    rtc::scoped_refptr<AudioDeviceModule> default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    std::unique_ptr<VideoEncoderFactory> video_encoder_factory,
-    std::unique_ptr<VideoDecoderFactory> video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer,
-    rtc::scoped_refptr<AudioProcessing> audio_processing);
-
-#if defined(USE_BUILTIN_SW_CODECS)
-// Create a new instance of PeerConnectionFactoryInterface with external audio
-// mixer.
-//
-// If |audio_mixer| is null, an internal audio mixer will be created and used.
-RTC_EXPORT rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactoryWithAudioMixer(
-    rtc::Thread* network_thread,
-    rtc::Thread* worker_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory,
-    rtc::scoped_refptr<AudioMixer> audio_mixer);
-
-// Create a new instance of PeerConnectionFactoryInterface.
-// Same thread is used as worker and network thread.
-RTC_EXPORT inline rtc::scoped_refptr<PeerConnectionFactoryInterface>
-CreatePeerConnectionFactory(
-    rtc::Thread* worker_and_network_thread,
-    rtc::Thread* signaling_thread,
-    AudioDeviceModule* default_adm,
-    rtc::scoped_refptr<AudioEncoderFactory> audio_encoder_factory,
-    rtc::scoped_refptr<AudioDecoderFactory> audio_decoder_factory,
-    cricket::WebRtcVideoEncoderFactory* video_encoder_factory,
-    cricket::WebRtcVideoDecoderFactory* video_decoder_factory) {
-  return CreatePeerConnectionFactory(
-      worker_and_network_thread, worker_and_network_thread, signaling_thread,
-      default_adm, audio_encoder_factory, audio_decoder_factory,
-      video_encoder_factory, video_decoder_factory);
-}
-#endif
-
 // This is a lower-level version of the CreatePeerConnectionFactory functions
 // above. It's implemented in the "peerconnection" build target, whereas the
 // above methods are only implemented in the broader "libjingle_peerconnection"
diff --git a/api/proxy.cc b/api/proxy.cc
index 01e6be5..e668285 100644
--- a/api/proxy.cc
+++ b/api/proxy.cc
@@ -14,7 +14,7 @@
 namespace internal {
 
 SynchronousMethodCall::SynchronousMethodCall(rtc::MessageHandler* proxy)
-    : e_(), proxy_(proxy) {}
+    : proxy_(proxy) {}
 
 SynchronousMethodCall::~SynchronousMethodCall() = default;
 
@@ -23,15 +23,14 @@
   if (t->IsCurrent()) {
     proxy_->OnMessage(nullptr);
   } else {
-    e_ = absl::make_unique<rtc::Event>();
     t->Post(posted_from, this, 0);
-    e_->Wait(rtc::Event::kForever);
+    e_.Wait(rtc::Event::kForever);
   }
 }
 
 void SynchronousMethodCall::OnMessage(rtc::Message*) {
   proxy_->OnMessage(nullptr);
-  e_->Set();
+  e_.Set();
 }
 
 }  // namespace internal
diff --git a/api/proxy.h b/api/proxy.h
index c8962ef..9916051 100644
--- a/api/proxy.h
+++ b/api/proxy.h
@@ -143,7 +143,7 @@
  private:
   void OnMessage(rtc::Message*) override;
 
-  std::unique_ptr<rtc::Event> e_;
+  rtc::Event e_;
   rtc::MessageHandler* proxy_;
 };
 
diff --git a/api/rtp_headers.h b/api/rtp_headers.h
index eff6223..c766899 100644
--- a/api/rtp_headers.h
+++ b/api/rtp_headers.h
@@ -15,7 +15,9 @@
 #include <stdint.h>
 #include <string.h>
 
+#include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/video/color_space.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_frame_marking.h"
 #include "api/video/video_rotation.h"
@@ -126,6 +128,8 @@
   // For identifying the media section used to interpret this RTP packet. See
   // https://tools.ietf.org/html/draft-ietf-mmusic-sdp-bundle-negotiation-38
   Mid mid;
+
+  absl::optional<ColorSpace> color_space;
 };
 
 struct RTPHeader {
diff --git a/api/scoped_refptr.h b/api/scoped_refptr.h
new file mode 100644
index 0000000..0993e03
--- /dev/null
+++ b/api/scoped_refptr.h
@@ -0,0 +1,162 @@
+/*
+ *  Copyright 2011 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+// Originally these classes are from Chromium.
+// http://src.chromium.org/viewvc/chrome/trunk/src/base/memory/ref_counted.h?view=markup
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = new MyFoo();
+//     ...
+//     foo = nullptr;  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references null.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = new MyFoo();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+
+#ifndef API_SCOPED_REFPTR_H_
+#define API_SCOPED_REFPTR_H_
+
+#include <memory>
+#include <utility>
+
+namespace rtc {
+
+template <class T>
+class scoped_refptr {
+ public:
+  scoped_refptr() : ptr_(nullptr) {}
+
+  scoped_refptr(T* p) : ptr_(p) {  // NOLINT(runtime/explicit)
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  template <typename U>
+  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
+    if (ptr_)
+      ptr_->AddRef();
+  }
+
+  // Move constructors.
+  scoped_refptr(scoped_refptr<T>&& r) : ptr_(r.release()) {}
+
+  template <typename U>
+  scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.release()) {}
+
+  ~scoped_refptr() {
+    if (ptr_)
+      ptr_->Release();
+  }
+
+  T* get() const { return ptr_; }
+  operator T*() const { return ptr_; }
+  T* operator->() const { return ptr_; }
+
+  // Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a
+  // null pointer, all without touching the reference count of the underlying
+  // pointed-to object. The object is still reference counted, and the caller of
+  // release() is now the proud owner of one reference, so it is responsible for
+  // calling Release() once on the object when no longer using it.
+  T* release() {
+    T* retVal = ptr_;
+    ptr_ = nullptr;
+    return retVal;
+  }
+
+  scoped_refptr<T>& operator=(T* p) {
+    // AddRef first so that self assignment should work
+    if (p)
+      p->AddRef();
+    if (ptr_)
+      ptr_->Release();
+    ptr_ = p;
+    return *this;
+  }
+
+  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
+    return *this = r.ptr_;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
+    return *this = r.get();
+  }
+
+  scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  template <typename U>
+  scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
+    scoped_refptr<T>(std::move(r)).swap(*this);
+    return *this;
+  }
+
+  void swap(T** pp) {
+    T* p = ptr_;
+    ptr_ = *pp;
+    *pp = p;
+  }
+
+  void swap(scoped_refptr<T>& r) { swap(&r.ptr_); }
+
+ protected:
+  T* ptr_;
+};
+
+}  // namespace rtc
+
+#endif  // API_SCOPED_REFPTR_H_
diff --git a/api/units/data_rate.cc b/api/units/data_rate.cc
index 9170627..d72d958 100644
--- a/api/units/data_rate.cc
+++ b/api/units/data_rate.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const DataRate& value) {
+std::string ToString(DataRate value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/data_rate.h b/api/units/data_rate.h
index 28efcd3..7119284 100644
--- a/api/units/data_rate.h
+++ b/api/units/data_rate.h
@@ -15,9 +15,6 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <algorithm>
-#include <cmath>
 #include <limits>
 #include <string>
 #include <type_traits>
@@ -25,12 +22,10 @@
 #include "api/units/data_size.h"
 #include "api/units/time_delta.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
 namespace data_rate_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-
 inline int64_t Microbits(const DataSize& size) {
   constexpr int64_t kMaxBeforeConversion =
       std::numeric_limits<int64_t>::max() / 8000000;
@@ -43,184 +38,64 @@
 // DataRate is a class that represents a given data rate. This can be used to
 // represent bandwidth, encoding bitrate, etc. The internal storage is bits per
 // second (bps).
-class DataRate {
+class DataRate final : public rtc_units_impl::RelativeUnit<DataRate> {
  public:
   DataRate() = delete;
-  static constexpr DataRate Zero() { return DataRate(0); }
-  static constexpr DataRate Infinity() {
-    return DataRate(data_rate_impl::kPlusInfinityVal);
-  }
+  static constexpr DataRate Infinity() { return PlusInfinity(); }
   template <int64_t bps>
   static constexpr DataRate BitsPerSec() {
-    static_assert(bps >= 0, "");
-    static_assert(bps < data_rate_impl::kPlusInfinityVal, "");
-    return DataRate(bps);
+    return FromStaticValue<bps>();
   }
   template <int64_t kbps>
   static constexpr DataRate KilobitsPerSec() {
-    static_assert(kbps >= 0, "");
-    static_assert(kbps < data_rate_impl::kPlusInfinityVal / 1000, "");
-    return DataRate(kbps * 1000);
-  }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-  static DataRate bps(T bits_per_second) {
-    RTC_DCHECK_GE(bits_per_second, 0);
-    RTC_DCHECK_LT(bits_per_second, data_rate_impl::kPlusInfinityVal);
-    return DataRate(rtc::dchecked_cast<int64_t>(bits_per_second));
-  }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
-  static DataRate kbps(T kilobits_per_sec) {
-    RTC_DCHECK_GE(kilobits_per_sec, 0);
-    RTC_DCHECK_LT(kilobits_per_sec, data_rate_impl::kPlusInfinityVal / 1000);
-    return DataRate::bps(rtc::dchecked_cast<int64_t>(kilobits_per_sec) * 1000);
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataRate bps(T bits_per_second) {
-    if (bits_per_second == std::numeric_limits<T>::infinity()) {
-      return Infinity();
-    } else {
-      RTC_DCHECK(!std::isnan(bits_per_second));
-      RTC_DCHECK_GE(bits_per_second, 0);
-      RTC_DCHECK_LT(bits_per_second, data_rate_impl::kPlusInfinityVal);
-      return DataRate(rtc::dchecked_cast<int64_t>(bits_per_second));
-    }
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataRate kbps(T kilobits_per_sec) {
-    return DataRate::bps(kilobits_per_sec * 1e3);
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type bps() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(bits_per_sec_);
-  }
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type kbps() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeKilobitsPerSec());
-  }
-
-  template <typename T>
-  typename std::enable_if<std::is_floating_point<T>::value,
-                          T>::type constexpr bps() const {
-    return IsInfinite() ? std::numeric_limits<T>::infinity() : bits_per_sec_;
+    return FromStaticFraction<kbps, 1000>();
   }
   template <typename T>
-  typename std::enable_if<std::is_floating_point<T>::value,
-                          T>::type constexpr kbps() const {
-    return bps<T>() * 1e-3;
+  static constexpr DataRate bps(T bits_per_second) {
+    return FromValue(bits_per_second);
   }
-
+  template <typename T>
+  static constexpr DataRate kbps(T kilobits_per_sec) {
+    return FromFraction<1000>(kilobits_per_sec);
+  }
+  template <typename T = int64_t>
+  constexpr T bps() const {
+    return ToValue<T>();
+  }
+  template <typename T = int64_t>
+  T kbps() const {
+    return ToFraction<1000, T>();
+  }
   constexpr int64_t bps_or(int64_t fallback_value) const {
-    return IsFinite() ? bits_per_sec_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
   constexpr int64_t kbps_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeKilobitsPerSec() : fallback_value;
-  }
-
-  constexpr bool IsZero() const { return bits_per_sec_ == 0; }
-  constexpr bool IsInfinite() const {
-    return bits_per_sec_ == data_rate_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  DataRate Clamped(DataRate min_rate, DataRate max_rate) const {
-    return std::max(min_rate, std::min(*this, max_rate));
-  }
-  void Clamp(DataRate min_rate, DataRate max_rate) {
-    *this = Clamped(min_rate, max_rate);
-  }
-  DataRate operator-(const DataRate& other) const {
-    return DataRate::bps(bps() - other.bps());
-  }
-  DataRate operator+(const DataRate& other) const {
-    return DataRate::bps(bps() + other.bps());
-  }
-  DataRate& operator-=(const DataRate& other) {
-    *this = *this - other;
-    return *this;
-  }
-  DataRate& operator+=(const DataRate& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const DataRate& other) const {
-    return bps<double>() / other.bps<double>();
-  }
-  constexpr bool operator==(const DataRate& other) const {
-    return bits_per_sec_ == other.bits_per_sec_;
-  }
-  constexpr bool operator!=(const DataRate& other) const {
-    return bits_per_sec_ != other.bits_per_sec_;
-  }
-  constexpr bool operator<=(const DataRate& other) const {
-    return bits_per_sec_ <= other.bits_per_sec_;
-  }
-  constexpr bool operator>=(const DataRate& other) const {
-    return bits_per_sec_ >= other.bits_per_sec_;
-  }
-  constexpr bool operator>(const DataRate& other) const {
-    return bits_per_sec_ > other.bits_per_sec_;
-  }
-  constexpr bool operator<(const DataRate& other) const {
-    return bits_per_sec_ < other.bits_per_sec_;
+    return ToFractionOr<1000>(fallback_value);
   }
 
  private:
   // Bits per second used internally to simplify debugging by making the value
   // more recognizable.
-  explicit constexpr DataRate(int64_t bits_per_second)
-      : bits_per_sec_(bits_per_second) {}
-  constexpr int64_t UnsafeKilobitsPerSec() const {
-    return (bits_per_sec_ + 500) / 1000;
-  }
-  int64_t bits_per_sec_;
+  friend class rtc_units_impl::UnitBase<DataRate>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = true;
 };
 
-inline DataRate operator*(const DataRate& rate, const double& scalar) {
-  return DataRate::bps(std::round(rate.bps() * scalar));
-}
-inline DataRate operator*(const double& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-inline DataRate operator*(const DataRate& rate, const int64_t& scalar) {
-  return DataRate::bps(rate.bps() * scalar);
-}
-inline DataRate operator*(const int64_t& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-inline DataRate operator*(const DataRate& rate, const int32_t& scalar) {
-  return DataRate::bps(rate.bps() * scalar);
-}
-inline DataRate operator*(const int32_t& scalar, const DataRate& rate) {
-  return rate * scalar;
-}
-
-inline DataRate operator/(const DataSize& size, const TimeDelta& duration) {
+inline DataRate operator/(const DataSize size, const TimeDelta duration) {
   return DataRate::bps(data_rate_impl::Microbits(size) / duration.us());
 }
-inline TimeDelta operator/(const DataSize& size, const DataRate& rate) {
+inline TimeDelta operator/(const DataSize size, const DataRate rate) {
   return TimeDelta::us(data_rate_impl::Microbits(size) / rate.bps());
 }
-inline DataSize operator*(const DataRate& rate, const TimeDelta& duration) {
+inline DataSize operator*(const DataRate rate, const TimeDelta duration) {
   int64_t microbits = rate.bps() * duration.us();
   return DataSize::bytes((microbits + 4000000) / 8000000);
 }
-inline DataSize operator*(const TimeDelta& duration, const DataRate& rate) {
+inline DataSize operator*(const TimeDelta duration, const DataRate rate) {
   return rate * duration;
 }
 
-std::string ToString(const DataRate& value);
+std::string ToString(DataRate value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/data_rate_unittest.cc b/api/units/data_rate_unittest.cc
index 8e5b660..996298c 100644
--- a/api/units/data_rate_unittest.cc
+++ b/api/units/data_rate_unittest.cc
@@ -130,6 +130,9 @@
 
   EXPECT_EQ(rate_a / rate_b, static_cast<double>(kValueA) / kValueB);
 
+  EXPECT_EQ((rate_a / 10).bps(), kValueA / 10);
+  EXPECT_NEAR((rate_a / 0.5).bps(), kValueA * 2, 1);
+
   DataRate mutable_rate = DataRate::bps(kValueA);
   mutable_rate += rate_b;
   EXPECT_EQ(mutable_rate.bps(), kValueA + kValueB);
diff --git a/api/units/data_size.cc b/api/units/data_size.cc
index 4440f89..8a87786 100644
--- a/api/units/data_size.cc
+++ b/api/units/data_size.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const DataSize& value) {
+std::string ToString(DataSize value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/data_size.h b/api/units/data_size.h
index 8958b24..b4cbb65 100644
--- a/api/units/data_size.h
+++ b/api/units/data_size.h
@@ -15,143 +15,44 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <cmath>
-#include <limits>
 #include <string>
 #include <type_traits>
 
-#include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
-namespace data_size_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-}  // namespace data_size_impl
-
 // DataSize is a class represeting a count of bytes.
-class DataSize {
+class DataSize final : public rtc_units_impl::RelativeUnit<DataSize> {
  public:
   DataSize() = delete;
-  static constexpr DataSize Zero() { return DataSize(0); }
-  static constexpr DataSize Infinity() {
-    return DataSize(data_size_impl::kPlusInfinityVal);
-  }
+  static constexpr DataSize Infinity() { return PlusInfinity(); }
   template <int64_t bytes>
   static constexpr DataSize Bytes() {
-    static_assert(bytes >= 0, "");
-    static_assert(bytes < data_size_impl::kPlusInfinityVal, "");
-    return DataSize(bytes);
+    return FromStaticValue<bytes>();
   }
 
   template <
       typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+      typename std::enable_if<std::is_arithmetic<T>::value>::type* = nullptr>
   static DataSize bytes(T bytes) {
-    RTC_DCHECK_GE(bytes, 0);
-    RTC_DCHECK_LT(bytes, data_size_impl::kPlusInfinityVal);
-    return DataSize(rtc::dchecked_cast<int64_t>(bytes));
+    return FromValue(bytes);
   }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static DataSize bytes(T bytes) {
-    if (bytes == std::numeric_limits<T>::infinity()) {
-      return Infinity();
-    } else {
-      RTC_DCHECK(!std::isnan(bytes));
-      RTC_DCHECK_GE(bytes, 0);
-      RTC_DCHECK_LT(bytes, data_size_impl::kPlusInfinityVal);
-      return DataSize(rtc::dchecked_cast<int64_t>(bytes));
-    }
-  }
-
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type bytes() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(bytes_);
-  }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  bytes() const {
-    return IsInfinite() ? std::numeric_limits<T>::infinity() : bytes_;
+  typename std::enable_if<std::is_arithmetic<T>::value, T>::type bytes() const {
+    return ToValue<T>();
   }
 
   constexpr int64_t bytes_or(int64_t fallback_value) const {
-    return IsFinite() ? bytes_ : fallback_value;
-  }
-
-  constexpr bool IsZero() const { return bytes_ == 0; }
-  constexpr bool IsInfinite() const {
-    return bytes_ == data_size_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  DataSize operator-(const DataSize& other) const {
-    return DataSize::bytes(bytes() - other.bytes());
-  }
-  DataSize operator+(const DataSize& other) const {
-    return DataSize::bytes(bytes() + other.bytes());
-  }
-  DataSize& operator-=(const DataSize& other) {
-    *this = *this - other;
-    return *this;
-  }
-  DataSize& operator+=(const DataSize& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const DataSize& other) const {
-    return bytes<double>() / other.bytes<double>();
-  }
-  constexpr bool operator==(const DataSize& other) const {
-    return bytes_ == other.bytes_;
-  }
-  constexpr bool operator!=(const DataSize& other) const {
-    return bytes_ != other.bytes_;
-  }
-  constexpr bool operator<=(const DataSize& other) const {
-    return bytes_ <= other.bytes_;
-  }
-  constexpr bool operator>=(const DataSize& other) const {
-    return bytes_ >= other.bytes_;
-  }
-  constexpr bool operator>(const DataSize& other) const {
-    return bytes_ > other.bytes_;
-  }
-  constexpr bool operator<(const DataSize& other) const {
-    return bytes_ < other.bytes_;
+    return ToValueOr(fallback_value);
   }
 
  private:
-  explicit constexpr DataSize(int64_t bytes) : bytes_(bytes) {}
-  int64_t bytes_;
+  friend class rtc_units_impl::UnitBase<DataSize>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = true;
 };
 
-inline DataSize operator*(const DataSize& size, const double& scalar) {
-  return DataSize::bytes(std::round(size.bytes() * scalar));
-}
-inline DataSize operator*(const double& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator*(const DataSize& size, const int64_t& scalar) {
-  return DataSize::bytes(size.bytes() * scalar);
-}
-inline DataSize operator*(const int64_t& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator*(const DataSize& size, const int32_t& scalar) {
-  return DataSize::bytes(size.bytes() * scalar);
-}
-inline DataSize operator*(const int32_t& scalar, const DataSize& size) {
-  return size * scalar;
-}
-inline DataSize operator/(const DataSize& size, const int64_t& scalar) {
-  return DataSize::bytes(size.bytes() / scalar);
-}
-
-std::string ToString(const DataSize& value);
+std::string ToString(DataSize value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/time_delta.cc b/api/units/time_delta.cc
index d38387a..f90451b 100644
--- a/api/units/time_delta.cc
+++ b/api/units/time_delta.cc
@@ -14,7 +14,7 @@
 
 namespace webrtc {
 
-std::string ToString(const TimeDelta& value) {
+std::string ToString(TimeDelta value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsPlusInfinity()) {
diff --git a/api/units/time_delta.h b/api/units/time_delta.h
index 74b5385..6458369 100644
--- a/api/units/time_delta.h
+++ b/api/units/time_delta.h
@@ -15,22 +15,13 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <stdint.h>
-#include <cmath>
 #include <cstdlib>
-#include <limits>
 #include <string>
 #include <type_traits>
 
-#include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/units/unit_base.h"
 
 namespace webrtc {
-namespace timedelta_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-constexpr int64_t kMinusInfinityVal = std::numeric_limits<int64_t>::min();
-}  // namespace timedelta_impl
-
 // TimeDelta represents the difference between two timestamps. Commonly this can
 // be a duration. However since two Timestamps are not guaranteed to have the
 // same epoch (they might come from different computers, making exact
@@ -38,245 +29,69 @@
 // undefined. To simplify usage, it can be constructed and converted to
 // different units, specifically seconds (s), milliseconds (ms) and
 // microseconds (us).
-class TimeDelta {
+class TimeDelta final : public rtc_units_impl::RelativeUnit<TimeDelta> {
  public:
   TimeDelta() = delete;
-  static constexpr TimeDelta Zero() { return TimeDelta(0); }
-  static constexpr TimeDelta PlusInfinity() {
-    return TimeDelta(timedelta_impl::kPlusInfinityVal);
-  }
-  static constexpr TimeDelta MinusInfinity() {
-    return TimeDelta(timedelta_impl::kMinusInfinityVal);
-  }
   template <int64_t seconds>
   static constexpr TimeDelta Seconds() {
-    static_assert(seconds > timedelta_impl::kMinusInfinityVal / 1000000, "");
-    static_assert(seconds < timedelta_impl::kPlusInfinityVal / 1000000, "");
-    return TimeDelta(seconds * 1000000);
+    return FromStaticFraction<seconds, 1000000>();
   }
   template <int64_t ms>
   static constexpr TimeDelta Millis() {
-    static_assert(ms > timedelta_impl::kMinusInfinityVal / 1000, "");
-    static_assert(ms < timedelta_impl::kPlusInfinityVal / 1000, "");
-    return TimeDelta(ms * 1000);
+    return FromStaticFraction<ms, 1000>();
   }
   template <int64_t us>
   static constexpr TimeDelta Micros() {
-    static_assert(us > timedelta_impl::kMinusInfinityVal, "");
-    static_assert(us < timedelta_impl::kPlusInfinityVal, "");
-    return TimeDelta(us);
+    return FromStaticValue<us>();
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta seconds(T seconds) {
-    RTC_DCHECK_GT(seconds, timedelta_impl::kMinusInfinityVal / 1000000);
-    RTC_DCHECK_LT(seconds, timedelta_impl::kPlusInfinityVal / 1000000);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(seconds) * 1000000);
+    return FromFraction<1000000>(seconds);
   }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta ms(T milliseconds) {
-    RTC_DCHECK_GT(milliseconds, timedelta_impl::kMinusInfinityVal / 1000);
-    RTC_DCHECK_LT(milliseconds, timedelta_impl::kPlusInfinityVal / 1000);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(milliseconds) * 1000);
+    return FromFraction<1000>(milliseconds);
   }
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static TimeDelta us(T microseconds) {
-    RTC_DCHECK_GT(microseconds, timedelta_impl::kMinusInfinityVal);
-    RTC_DCHECK_LT(microseconds, timedelta_impl::kPlusInfinityVal);
-    return TimeDelta(rtc::dchecked_cast<int64_t>(microseconds));
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta seconds(T seconds) {
-    return TimeDelta::us(seconds * 1e6);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta ms(T milliseconds) {
-    return TimeDelta::us(milliseconds * 1e3);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static TimeDelta us(T microseconds) {
-    if (microseconds == std::numeric_limits<T>::infinity()) {
-      return PlusInfinity();
-    } else if (microseconds == -std::numeric_limits<T>::infinity()) {
-      return MinusInfinity();
-    } else {
-      RTC_DCHECK(!std::isnan(microseconds));
-      RTC_DCHECK_GT(microseconds, timedelta_impl::kMinusInfinityVal);
-      RTC_DCHECK_LT(microseconds, timedelta_impl::kPlusInfinityVal);
-      return TimeDelta(rtc::dchecked_cast<int64_t>(microseconds));
-    }
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type seconds() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeSeconds());
+    return FromValue(microseconds);
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ms() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeMillis());
+  T seconds() const {
+    return ToFraction<1000000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type us() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(microseconds_);
+  T ms() const {
+    return ToFraction<1000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ns() const {
-    RTC_DCHECK_GE(us(), std::numeric_limits<T>::min() / 1000);
-    RTC_DCHECK_LE(us(), std::numeric_limits<T>::max() / 1000);
-    return rtc::dchecked_cast<T>(us() * 1000);
+  T us() const {
+    return ToValue<T>();
   }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  seconds() const {
-    return us<T>() * 1e-6;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ms() const {
-    return us<T>() * 1e-3;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  us() const {
-    return IsPlusInfinity()
-               ? std::numeric_limits<T>::infinity()
-               : IsMinusInfinity() ? -std::numeric_limits<T>::infinity()
-                                   : microseconds_;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ns() const {
-    return us<T>() * 1e3;
+  template <typename T = int64_t>
+  T ns() const {
+    return ToMultiple<1000, T>();
   }
 
   constexpr int64_t seconds_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeSeconds() : fallback_value;
+    return ToFractionOr<1000000>(fallback_value);
   }
   constexpr int64_t ms_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeMillis() : fallback_value;
+    return ToFractionOr<1000>(fallback_value);
   }
   constexpr int64_t us_or(int64_t fallback_value) const {
-    return IsFinite() ? microseconds_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
 
   TimeDelta Abs() const { return TimeDelta::us(std::abs(us())); }
-  constexpr bool IsZero() const { return microseconds_ == 0; }
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  constexpr bool IsInfinite() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal ||
-           microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  constexpr bool IsPlusInfinity() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsMinusInfinity() const {
-    return microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  TimeDelta operator+(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsPlusInfinity()) {
-      RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
-      return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsMinusInfinity()) {
-      RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
-      return MinusInfinity();
-    }
-    return TimeDelta::us(us() + other.us());
-  }
-  TimeDelta operator-(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsMinusInfinity()) {
-      RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
-      return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsPlusInfinity()) {
-      RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
-      return MinusInfinity();
-    }
-    return TimeDelta::us(us() - other.us());
-  }
-  TimeDelta& operator-=(const TimeDelta& other) {
-    *this = *this - other;
-    return *this;
-  }
-  TimeDelta& operator+=(const TimeDelta& other) {
-    *this = *this + other;
-    return *this;
-  }
-  constexpr double operator/(const TimeDelta& other) const {
-    return us<double>() / other.us<double>();
-  }
-  constexpr bool operator==(const TimeDelta& other) const {
-    return microseconds_ == other.microseconds_;
-  }
-  constexpr bool operator!=(const TimeDelta& other) const {
-    return microseconds_ != other.microseconds_;
-  }
-  constexpr bool operator<=(const TimeDelta& other) const {
-    return microseconds_ <= other.microseconds_;
-  }
-  constexpr bool operator>=(const TimeDelta& other) const {
-    return microseconds_ >= other.microseconds_;
-  }
-  constexpr bool operator>(const TimeDelta& other) const {
-    return microseconds_ > other.microseconds_;
-  }
-  constexpr bool operator<(const TimeDelta& other) const {
-    return microseconds_ < other.microseconds_;
-  }
 
  private:
-  explicit constexpr TimeDelta(int64_t us) : microseconds_(us) {}
-  constexpr int64_t UnsafeSeconds() const {
-    return (microseconds_ + (microseconds_ >= 0 ? 500000 : -500000)) / 1000000;
-  }
-  constexpr int64_t UnsafeMillis() const {
-    return (microseconds_ + (microseconds_ >= 0 ? 500 : -500)) / 1000;
-  }
-  int64_t microseconds_;
+  friend class rtc_units_impl::UnitBase<TimeDelta>;
+  using RelativeUnit::RelativeUnit;
+  static constexpr bool one_sided = false;
 };
 
-inline TimeDelta operator*(const TimeDelta& delta, const double& scalar) {
-  return TimeDelta::us(std::round(delta.us() * scalar));
-}
-inline TimeDelta operator*(const double& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-inline TimeDelta operator*(const TimeDelta& delta, const int64_t& scalar) {
-  return TimeDelta::us(delta.us() * scalar);
-}
-inline TimeDelta operator*(const int64_t& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-inline TimeDelta operator*(const TimeDelta& delta, const int32_t& scalar) {
-  return TimeDelta::us(delta.us() * scalar);
-}
-inline TimeDelta operator*(const int32_t& scalar, const TimeDelta& delta) {
-  return delta * scalar;
-}
-
-inline TimeDelta operator/(const TimeDelta& delta, const int64_t& scalar) {
-  return TimeDelta::us(delta.us() / scalar);
-}
-std::string ToString(const TimeDelta& value);
+std::string ToString(TimeDelta value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/units/time_delta_unittest.cc b/api/units/time_delta_unittest.cc
index bf8bbce..a46ba83 100644
--- a/api/units/time_delta_unittest.cc
+++ b/api/units/time_delta_unittest.cc
@@ -10,6 +10,8 @@
 
 #include "api/units/time_delta.h"
 
+#include <limits>
+
 #include "test/gtest.h"
 
 namespace webrtc {
@@ -106,6 +108,27 @@
   EXPECT_LT(TimeDelta::MinusInfinity(), TimeDelta::Zero());
 }
 
+TEST(TimeDeltaTest, Clamping) {
+  const TimeDelta upper = TimeDelta::ms(800);
+  const TimeDelta lower = TimeDelta::ms(100);
+  const TimeDelta under = TimeDelta::ms(100);
+  const TimeDelta inside = TimeDelta::ms(500);
+  const TimeDelta over = TimeDelta::ms(1000);
+  EXPECT_EQ(under.Clamped(lower, upper), lower);
+  EXPECT_EQ(inside.Clamped(lower, upper), inside);
+  EXPECT_EQ(over.Clamped(lower, upper), upper);
+
+  TimeDelta mutable_delta = lower;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, lower);
+  mutable_delta = inside;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, inside);
+  mutable_delta = over;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, upper);
+}
+
 TEST(TimeDeltaTest, CanBeInititializedFromLargeInt) {
   const int kMaxInt = std::numeric_limits<int>::max();
   EXPECT_EQ(TimeDelta::seconds(kMaxInt).us(),
diff --git a/api/units/timestamp.cc b/api/units/timestamp.cc
index feb1447..d3417cf 100644
--- a/api/units/timestamp.cc
+++ b/api/units/timestamp.cc
@@ -13,7 +13,7 @@
 #include "rtc_base/strings/string_builder.h"
 
 namespace webrtc {
-std::string ToString(const Timestamp& value) {
+std::string ToString(Timestamp value) {
   char buf[64];
   rtc::SimpleStringBuilder sb(buf);
   if (value.IsInfinite()) {
diff --git a/api/units/timestamp.h b/api/units/timestamp.h
index 80f1839..a6e450f 100644
--- a/api/units/timestamp.h
+++ b/api/units/timestamp.h
@@ -15,191 +15,94 @@
 #include <ostream>  // no-presubmit-check TODO(webrtc:8982)
 #endif              // UNIT_TEST
 
-#include <math.h>
-#include <stdint.h>
-#include <limits>
 #include <string>
 #include <type_traits>
 
 #include "api/units/time_delta.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/numerics/safe_conversions.h"
 
 namespace webrtc {
-namespace timestamp_impl {
-constexpr int64_t kPlusInfinityVal = std::numeric_limits<int64_t>::max();
-constexpr int64_t kMinusInfinityVal = std::numeric_limits<int64_t>::min();
-}  // namespace timestamp_impl
-
 // Timestamp represents the time that has passed since some unspecified epoch.
 // The epoch is assumed to be before any represented timestamps, this means that
 // negative values are not valid. The most notable feature is that the
 // difference of two Timestamps results in a TimeDelta.
-class Timestamp {
+class Timestamp final : public rtc_units_impl::UnitBase<Timestamp> {
  public:
   Timestamp() = delete;
-  static constexpr Timestamp PlusInfinity() {
-    return Timestamp(timestamp_impl::kPlusInfinityVal);
-  }
-  static constexpr Timestamp MinusInfinity() {
-    return Timestamp(timestamp_impl::kMinusInfinityVal);
-  }
+
   template <int64_t seconds>
   static constexpr Timestamp Seconds() {
-    static_assert(seconds >= 0, "");
-    static_assert(seconds < timestamp_impl::kPlusInfinityVal / 1000000, "");
-    return Timestamp(seconds * 1000000);
+    return FromStaticFraction<seconds, 1000000>();
   }
   template <int64_t ms>
   static constexpr Timestamp Millis() {
-    static_assert(ms >= 0, "");
-    static_assert(ms < timestamp_impl::kPlusInfinityVal / 1000, "");
-    return Timestamp(ms * 1000);
+    return FromStaticFraction<ms, 1000>();
   }
   template <int64_t us>
   static constexpr Timestamp Micros() {
-    static_assert(us >= 0, "");
-    static_assert(us < timestamp_impl::kPlusInfinityVal, "");
-    return Timestamp(us);
+    return FromStaticValue<us>();
   }
 
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp seconds(T seconds) {
-    RTC_DCHECK_GE(seconds, 0);
-    RTC_DCHECK_LT(seconds, timestamp_impl::kPlusInfinityVal / 1000000);
-    return Timestamp(rtc::dchecked_cast<int64_t>(seconds) * 1000000);
+    return FromFraction<1000000>(seconds);
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp ms(T milliseconds) {
-    RTC_DCHECK_GE(milliseconds, 0);
-    RTC_DCHECK_LT(milliseconds, timestamp_impl::kPlusInfinityVal / 1000);
-    return Timestamp(rtc::dchecked_cast<int64_t>(milliseconds) * 1000);
+    return FromFraction<1000>(milliseconds);
   }
-
-  template <
-      typename T,
-      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  template <typename T>
   static Timestamp us(T microseconds) {
-    RTC_DCHECK_GE(microseconds, 0);
-    RTC_DCHECK_LT(microseconds, timestamp_impl::kPlusInfinityVal);
-    return Timestamp(rtc::dchecked_cast<int64_t>(microseconds));
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp seconds(T seconds) {
-    return Timestamp::us(seconds * 1e6);
-  }
-
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp ms(T milliseconds) {
-    return Timestamp::us(milliseconds * 1e3);
-  }
-  template <typename T,
-            typename std::enable_if<std::is_floating_point<T>::value>::type* =
-                nullptr>
-  static Timestamp us(T microseconds) {
-    if (microseconds == std::numeric_limits<double>::infinity()) {
-      return PlusInfinity();
-    } else if (microseconds == -std::numeric_limits<double>::infinity()) {
-      return MinusInfinity();
-    } else {
-      RTC_DCHECK(!std::isnan(microseconds));
-      RTC_DCHECK_GE(microseconds, 0);
-      RTC_DCHECK_LT(microseconds, timestamp_impl::kPlusInfinityVal);
-      return Timestamp(rtc::dchecked_cast<int64_t>(microseconds));
-    }
-  }
-
-  template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type seconds() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeSeconds());
+    return FromValue(microseconds);
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type ms() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(UnsafeMillis());
+  T seconds() const {
+    return ToFraction<1000000, T>();
   }
   template <typename T = int64_t>
-  typename std::enable_if<std::is_integral<T>::value, T>::type us() const {
-    RTC_DCHECK(IsFinite());
-    return rtc::dchecked_cast<T>(microseconds_);
+  T ms() const {
+    return ToFraction<1000, T>();
   }
-
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  seconds() const {
-    return us<T>() * 1e-6;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  ms() const {
-    return us<T>() * 1e-3;
-  }
-  template <typename T>
-  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
-  us() const {
-    return IsPlusInfinity()
-               ? std::numeric_limits<T>::infinity()
-               : IsMinusInfinity() ? -std::numeric_limits<T>::infinity()
-                                   : microseconds_;
+  template <typename T = int64_t>
+  T us() const {
+    return ToValue<T>();
   }
 
   constexpr int64_t seconds_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeSeconds() : fallback_value;
+    return ToFractionOr<1000000>(fallback_value);
   }
   constexpr int64_t ms_or(int64_t fallback_value) const {
-    return IsFinite() ? UnsafeMillis() : fallback_value;
+    return ToFractionOr<1000>(fallback_value);
   }
   constexpr int64_t us_or(int64_t fallback_value) const {
-    return IsFinite() ? microseconds_ : fallback_value;
+    return ToValueOr(fallback_value);
   }
 
-  constexpr bool IsFinite() const { return !IsInfinite(); }
-  constexpr bool IsInfinite() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal ||
-           microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  constexpr bool IsPlusInfinity() const {
-    return microseconds_ == timedelta_impl::kPlusInfinityVal;
-  }
-  constexpr bool IsMinusInfinity() const {
-    return microseconds_ == timedelta_impl::kMinusInfinityVal;
-  }
-  Timestamp operator+(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsPlusInfinity()) {
+  Timestamp operator+(const TimeDelta delta) const {
+    if (IsPlusInfinity() || delta.IsPlusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
+      RTC_DCHECK(!delta.IsMinusInfinity());
       return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsMinusInfinity()) {
+    } else if (IsMinusInfinity() || delta.IsMinusInfinity()) {
       RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
+      RTC_DCHECK(!delta.IsPlusInfinity());
       return MinusInfinity();
     }
-    return Timestamp::us(us() + other.us());
+    return Timestamp::us(us() + delta.us());
   }
-  Timestamp operator-(const TimeDelta& other) const {
-    if (IsPlusInfinity() || other.IsMinusInfinity()) {
+  Timestamp operator-(const TimeDelta delta) const {
+    if (IsPlusInfinity() || delta.IsMinusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
-      RTC_DCHECK(!other.IsPlusInfinity());
+      RTC_DCHECK(!delta.IsPlusInfinity());
       return PlusInfinity();
-    } else if (IsMinusInfinity() || other.IsPlusInfinity()) {
+    } else if (IsMinusInfinity() || delta.IsPlusInfinity()) {
       RTC_DCHECK(!IsPlusInfinity());
-      RTC_DCHECK(!other.IsMinusInfinity());
+      RTC_DCHECK(!delta.IsMinusInfinity());
       return MinusInfinity();
     }
-    return Timestamp::us(us() - other.us());
+    return Timestamp::us(us() - delta.us());
   }
-  TimeDelta operator-(const Timestamp& other) const {
+  TimeDelta operator-(const Timestamp other) const {
     if (IsPlusInfinity() || other.IsMinusInfinity()) {
       RTC_DCHECK(!IsMinusInfinity());
       RTC_DCHECK(!other.IsPlusInfinity());
@@ -211,45 +114,22 @@
     }
     return TimeDelta::us(us() - other.us());
   }
-  Timestamp& operator-=(const TimeDelta& other) {
-    *this = *this - other;
+  Timestamp& operator-=(const TimeDelta delta) {
+    *this = *this - delta;
     return *this;
   }
-  Timestamp& operator+=(const TimeDelta& other) {
-    *this = *this + other;
+  Timestamp& operator+=(const TimeDelta delta) {
+    *this = *this + delta;
     return *this;
   }
-  constexpr bool operator==(const Timestamp& other) const {
-    return microseconds_ == other.microseconds_;
-  }
-  constexpr bool operator!=(const Timestamp& other) const {
-    return microseconds_ != other.microseconds_;
-  }
-  constexpr bool operator<=(const Timestamp& other) const {
-    return microseconds_ <= other.microseconds_;
-  }
-  constexpr bool operator>=(const Timestamp& other) const {
-    return microseconds_ >= other.microseconds_;
-  }
-  constexpr bool operator>(const Timestamp& other) const {
-    return microseconds_ > other.microseconds_;
-  }
-  constexpr bool operator<(const Timestamp& other) const {
-    return microseconds_ < other.microseconds_;
-  }
 
  private:
-  explicit constexpr Timestamp(int64_t us) : microseconds_(us) {}
-  constexpr int64_t UnsafeSeconds() const {
-    return (microseconds_ + 500000) / 1000000;
-  }
-  constexpr int64_t UnsafeMillis() const {
-    return (microseconds_ + 500) / 1000;
-  }
-  int64_t microseconds_;
+  friend class rtc_units_impl::UnitBase<Timestamp>;
+  using UnitBase::UnitBase;
+  static constexpr bool one_sided = true;
 };
 
-std::string ToString(const Timestamp& value);
+std::string ToString(Timestamp value);
 
 #ifdef UNIT_TEST
 inline std::ostream& operator<<(  // no-presubmit-check TODO(webrtc:8982)
diff --git a/api/video/color_space.cc b/api/video/color_space.cc
index a8be5cd..ad138ab 100644
--- a/api/video/color_space.cc
+++ b/api/video/color_space.cc
@@ -10,18 +10,72 @@
 
 #include "api/video/color_space.h"
 
+namespace {
+// Try to convert |enum_value| into the enum class T. |enum_bitmask| is created
+// by the funciton below. Returns true if conversion was successful, false
+// otherwise.
+template <typename T>
+bool SetFromUint8(uint8_t enum_value, uint64_t enum_bitmask, T* out) {
+  if ((enum_value < 64) && ((enum_bitmask >> enum_value) & 1)) {
+    *out = static_cast<T>(enum_value);
+    return true;
+  }
+  return false;
+}
+
+// This function serves as an assert for the constexpr function below. It's on
+// purpose not declared as constexpr so that it causes a build problem if enum
+// values of 64 or above are used. The bitmask and the code generating it would
+// have to be extended if the standard is updated to include enum values >= 64.
+int EnumMustBeLessThan64() {
+  return -1;
+}
+
+template <typename T, size_t N>
+constexpr int MakeMask(const int index, const int length, T (&values)[N]) {
+  return length > 1
+             ? (MakeMask(index, 1, values) +
+                MakeMask(index + 1, length - 1, values))
+             : (static_cast<uint8_t>(values[index]) < 64
+                    ? (uint64_t{1} << static_cast<uint8_t>(values[index]))
+                    : EnumMustBeLessThan64());
+}
+
+// Create a bitmask where each bit corresponds to one potential enum value.
+// |values| should be an array listing all possible enum values. The bit is set
+// to one if the corresponding enum exists. Only works for enums with values
+// less than 64.
+template <typename T, size_t N>
+constexpr uint64_t CreateEnumBitmask(T (&values)[N]) {
+  return MakeMask(0, N, values);
+}
+
+}  // namespace
+
 namespace webrtc {
 
 ColorSpace::ColorSpace() = default;
+ColorSpace::ColorSpace(const ColorSpace& other) = default;
+ColorSpace::ColorSpace(ColorSpace&& other) = default;
+ColorSpace& ColorSpace::operator=(const ColorSpace& other) = default;
 
 ColorSpace::ColorSpace(PrimaryID primaries,
                        TransferID transfer,
                        MatrixID matrix,
                        RangeID range)
+    : ColorSpace(primaries, transfer, matrix, range, nullptr) {}
+
+ColorSpace::ColorSpace(PrimaryID primaries,
+                       TransferID transfer,
+                       MatrixID matrix,
+                       RangeID range,
+                       const HdrMetadata* hdr_metadata)
     : primaries_(primaries),
       transfer_(transfer),
       matrix_(matrix),
-      range_(range) {}
+      range_(range),
+      hdr_metadata_(hdr_metadata ? absl::make_optional(*hdr_metadata)
+                                 : absl::nullopt) {}
 
 ColorSpace::PrimaryID ColorSpace::primaries() const {
   return primaries_;
@@ -39,4 +93,61 @@
   return range_;
 }
 
+const HdrMetadata* ColorSpace::hdr_metadata() const {
+  return hdr_metadata_ ? &*hdr_metadata_ : nullptr;
+}
+
+bool ColorSpace::set_primaries_from_uint8(uint8_t enum_value) {
+  constexpr PrimaryID kPrimaryIds[] = {
+      PrimaryID::kInvalid,    PrimaryID::kBT709,      PrimaryID::kUNSPECIFIED,
+      PrimaryID::kBT470M,     PrimaryID::kBT470BG,    PrimaryID::kSMPTE170M,
+      PrimaryID::kSMPTE240M,  PrimaryID::kFILM,       PrimaryID::kBT2020,
+      PrimaryID::kSMPTEST428, PrimaryID::kSMPTEST431, PrimaryID::kSMPTEST432,
+      PrimaryID::kJEDECP22};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kPrimaryIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &primaries_);
+}
+
+bool ColorSpace::set_transfer_from_uint8(uint8_t enum_value) {
+  constexpr TransferID kTransferIds[] = {
+      TransferID::kInvalid,      TransferID::kBT709,
+      TransferID::kUNSPECIFIED,  TransferID::kGAMMA22,
+      TransferID::kGAMMA28,      TransferID::kSMPTE170M,
+      TransferID::kSMPTE240M,    TransferID::kLINEAR,
+      TransferID::kLOG,          TransferID::kLOG_SQRT,
+      TransferID::kIEC61966_2_4, TransferID::kBT1361_ECG,
+      TransferID::kIEC61966_2_1, TransferID::kBT2020_10,
+      TransferID::kBT2020_12,    TransferID::kSMPTEST2084,
+      TransferID::kSMPTEST428,   TransferID::kARIB_STD_B67};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kTransferIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &transfer_);
+}
+
+bool ColorSpace::set_matrix_from_uint8(uint8_t enum_value) {
+  constexpr MatrixID kMatrixIds[] = {
+      MatrixID::kRGB,       MatrixID::kBT709,        MatrixID::kUNSPECIFIED,
+      MatrixID::kFCC,       MatrixID::kBT470BG,      MatrixID::kSMPTE170M,
+      MatrixID::kSMPTE240M, MatrixID::kYCOCG,        MatrixID::kBT2020_NCL,
+      MatrixID::kBT2020_CL, MatrixID::kSMPTE2085,    MatrixID::kCDNCLS,
+      MatrixID::kCDCLS,     MatrixID::kBT2100_ICTCP, MatrixID::kInvalid};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kMatrixIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &matrix_);
+}
+
+bool ColorSpace::set_range_from_uint8(uint8_t enum_value) {
+  constexpr RangeID kRangeIds[] = {RangeID::kInvalid, RangeID::kLimited,
+                                   RangeID::kFull, RangeID::kDerived};
+  constexpr uint64_t enum_bitmask = CreateEnumBitmask(kRangeIds);
+
+  return SetFromUint8(enum_value, enum_bitmask, &range_);
+}
+
+void ColorSpace::set_hdr_metadata(const HdrMetadata* hdr_metadata) {
+  hdr_metadata_ =
+      hdr_metadata ? absl::make_optional(*hdr_metadata) : absl::nullopt;
+}
+
 }  // namespace webrtc
diff --git a/api/video/color_space.h b/api/video/color_space.h
index 8102647..79a15f5 100644
--- a/api/video/color_space.h
+++ b/api/video/color_space.h
@@ -11,95 +11,145 @@
 #ifndef API_VIDEO_COLOR_SPACE_H_
 #define API_VIDEO_COLOR_SPACE_H_
 
+#include <stdint.h>
+
+#include "absl/types/optional.h"
+#include "api/video/hdr_metadata.h"
+
 namespace webrtc {
 
-// Used to represent a color space for the purpose of color conversion. This
-// class only represents color information that can be transferred through the
-// bitstream of WebRTC's internal supported codecs:
+// This class represents color information as specified in T-REC H.273,
+// available from https://www.itu.int/rec/T-REC-H.273.
+//
+// WebRTC's supported codecs:
 // - VP9 supports color profiles, see VP9 Bitstream & Decoding Process
 // Specification Version 0.6 Section 7.2.2 "Color config semantics" available
 // from https://www.webmproject.org.
-// TODO(emircan): Extract these values from decode and add to the existing ones.
 // - VP8 only supports BT.601, see
 // https://tools.ietf.org/html/rfc6386#section-9.2
-// - H264 supports different color primaries, transfer characteristics, matrix
-// coefficients and range. See T-REC-H.264 E.2.1, "VUI parameters semantics",
-// available from https://www.itu.int/rec/T-REC-H.264.
+// - H264 uses the exact same representation as T-REC H.273. See T-REC-H.264
+// E.2.1, "VUI parameters semantics", available from
+// https://www.itu.int/rec/T-REC-H.264.
+
 class ColorSpace {
  public:
-  enum class PrimaryID {
-    kInvalid,
-    kBT709,
-    kBT470M,
-    kBT470BG,
-    kSMPTE170M,  // Identical to BT601
-    kSMPTE240M,
-    kFILM,
-    kBT2020,
-    kSMPTEST428,
-    kSMPTEST431,
-    kSMPTEST432,
-    kJEDECP22,
+  enum class PrimaryID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 2.
+    kInvalid = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kBT470M = 4,
+    kBT470BG = 5,
+    kSMPTE170M = 6,  // Identical to BT601
+    kSMPTE240M = 7,
+    kFILM = 8,
+    kBT2020 = 9,
+    kSMPTEST428 = 10,
+    kSMPTEST431 = 11,
+    kSMPTEST432 = 12,
+    kJEDECP22 = 22,  // Identical to EBU3213-E
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kPrimaryIds.
   };
 
-  enum class TransferID {
-    kInvalid,
-    kBT709,
-    kGAMMA22,
-    kGAMMA28,
-    kSMPTE170M,
-    kSMPTE240M,
-    kLINEAR,
-    kLOG,
-    kLOG_SQRT,
-    kIEC61966_2_4,
-    kBT1361_ECG,
-    kIEC61966_2_1,
-    kBT2020_10,
-    kBT2020_12,
-    kSMPTEST2084,
-    kSMPTEST428,
-    kARIB_STD_B67,
+  enum class TransferID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 3.
+    kInvalid = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kGAMMA22 = 4,
+    kGAMMA28 = 5,
+    kSMPTE170M = 6,
+    kSMPTE240M = 7,
+    kLINEAR = 8,
+    kLOG = 9,
+    kLOG_SQRT = 10,
+    kIEC61966_2_4 = 11,
+    kBT1361_ECG = 12,
+    kIEC61966_2_1 = 13,
+    kBT2020_10 = 14,
+    kBT2020_12 = 15,
+    kSMPTEST2084 = 16,
+    kSMPTEST428 = 17,
+    kARIB_STD_B67 = 18,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kTransferIds.
   };
 
-  enum class MatrixID {
-    kInvalid,
-    kRGB,
-    kBT709,
-    kFCC,
-    kBT470BG,
-    kSMPTE170M,
-    kSMPTE240M,
-    kYCOCG,
-    kBT2020_NCL,
-    kBT2020_CL,
-    kSMPTE2085,
+  enum class MatrixID : uint8_t {
+    // The indices are equal to the values specified in T-REC H.273 Table 4.
+    kRGB = 0,
+    kBT709 = 1,
+    kUNSPECIFIED = 2,
+    kFCC = 4,
+    kBT470BG = 5,
+    kSMPTE170M = 6,
+    kSMPTE240M = 7,
+    kYCOCG = 8,
+    kBT2020_NCL = 9,
+    kBT2020_CL = 10,
+    kSMPTE2085 = 11,
+    kCDNCLS = 12,
+    kCDCLS = 13,
+    kBT2100_ICTCP = 14,
+    kInvalid = 63,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kMatrixIds.
   };
 
   enum class RangeID {
-    kInvalid,
+    // The indices are equal to the values specified at
+    // https://www.webmproject.org/docs/container/#colour for the element Range.
+    kInvalid = 0,
     // Limited Rec. 709 color range with RGB values ranging from 16 to 235.
-    kLimited,
+    kLimited = 1,
     // Full RGB color range with RGB valees from 0 to 255.
-    kFull,
+    kFull = 2,
+    // Range is defined by MatrixCoefficients/TransferCharacteristics.
+    kDerived = 3,
+    // When adding/removing entries here, please make sure to do the
+    // corresponding change to kRangeIds.
   };
 
   ColorSpace();
+  ColorSpace(const ColorSpace& other);
+  ColorSpace(ColorSpace&& other);
+  ColorSpace& operator=(const ColorSpace& other);
   ColorSpace(PrimaryID primaries,
              TransferID transfer,
              MatrixID matrix,
              RangeID full_range);
+  ColorSpace(PrimaryID primaries,
+             TransferID transfer,
+             MatrixID matrix,
+             RangeID range,
+             const HdrMetadata* hdr_metadata);
+  bool operator==(const ColorSpace& other) const {
+    return primaries_ == other.primaries() && transfer_ == other.transfer() &&
+           matrix_ == other.matrix() && range_ == other.range() &&
+           ((hdr_metadata_.has_value() && other.hdr_metadata() &&
+             *hdr_metadata_ == *other.hdr_metadata()) ||
+            (!hdr_metadata_.has_value() && other.hdr_metadata() == nullptr));
+  }
 
   PrimaryID primaries() const;
   TransferID transfer() const;
   MatrixID matrix() const;
   RangeID range() const;
+  const HdrMetadata* hdr_metadata() const;
+
+  bool set_primaries_from_uint8(uint8_t enum_value);
+  bool set_transfer_from_uint8(uint8_t enum_value);
+  bool set_matrix_from_uint8(uint8_t enum_value);
+  bool set_range_from_uint8(uint8_t enum_value);
+  void set_hdr_metadata(const HdrMetadata* hdr_metadata);
 
  private:
   PrimaryID primaries_ = PrimaryID::kInvalid;
   TransferID transfer_ = TransferID::kInvalid;
   MatrixID matrix_ = MatrixID::kInvalid;
   RangeID range_ = RangeID::kInvalid;
+  absl::optional<HdrMetadata> hdr_metadata_;
 };
 
 }  // namespace webrtc
diff --git a/api/video/encoded_image.h b/api/video/encoded_image.h
index 5c4a82d..a7c719c 100644
--- a/api/video/encoded_image.h
+++ b/api/video/encoded_image.h
@@ -14,7 +14,9 @@
 #include <stdint.h>
 
 #include "absl/types/optional.h"
+#include "api/video/color_space.h"
 #include "api/video/video_bitrate_allocation.h"
+#include "api/video/video_codec_type.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_rotation.h"
 #include "api/video/video_timing.h"
@@ -49,14 +51,20 @@
   void SetEncodeTime(int64_t encode_start_ms, int64_t encode_finish_ms);
 
   absl::optional<int> SpatialIndex() const {
-    if (spatial_index_ < 0)
-      return absl::nullopt;
     return spatial_index_;
   }
   void SetSpatialIndex(absl::optional<int> spatial_index) {
     RTC_DCHECK_GE(spatial_index.value_or(0), 0);
     RTC_DCHECK_LT(spatial_index.value_or(0), kMaxSpatialLayers);
-    spatial_index_ = spatial_index.value_or(-1);
+    spatial_index_ = spatial_index;
+  }
+
+  const webrtc::ColorSpace* ColorSpace() const {
+    return color_space_ ? &*color_space_ : nullptr;
+  }
+  void SetColorSpace(const webrtc::ColorSpace* color_space) {
+    color_space_ =
+        color_space ? absl::make_optional(*color_space) : absl::nullopt;
   }
 
   uint32_t _encodedWidth = 0;
@@ -92,9 +100,8 @@
 
  private:
   uint32_t timestamp_rtp_ = 0;
-  // -1 means not set. Use a plain int rather than optional, to keep this class
-  // copyable with memcpy.
-  int spatial_index_ = -1;
+  absl::optional<int> spatial_index_;
+  absl::optional<webrtc::ColorSpace> color_space_;
 };
 
 }  // namespace webrtc
diff --git a/api/video/hdr_metadata.cc b/api/video/hdr_metadata.cc
index bfe54ce..e2a669c 100644
--- a/api/video/hdr_metadata.cc
+++ b/api/video/hdr_metadata.cc
@@ -13,23 +13,9 @@
 namespace webrtc {
 
 HdrMasteringMetadata::Chromaticity::Chromaticity() = default;
-HdrMasteringMetadata::Chromaticity::Chromaticity(const Chromaticity& rhs) =
-    default;
-HdrMasteringMetadata::Chromaticity::Chromaticity(Chromaticity&& rhs) = default;
-HdrMasteringMetadata::Chromaticity& HdrMasteringMetadata::Chromaticity::
-operator=(const Chromaticity& rhs) = default;
 
 HdrMasteringMetadata::HdrMasteringMetadata() = default;
-HdrMasteringMetadata::HdrMasteringMetadata(const HdrMasteringMetadata& rhs) =
-    default;
-HdrMasteringMetadata::HdrMasteringMetadata(HdrMasteringMetadata&& rhs) =
-    default;
-HdrMasteringMetadata& HdrMasteringMetadata::operator=(
-    const HdrMasteringMetadata& rhs) = default;
 
 HdrMetadata::HdrMetadata() = default;
-HdrMetadata::HdrMetadata(const HdrMetadata& rhs) = default;
-HdrMetadata::HdrMetadata(HdrMetadata&& rhs) = default;
-HdrMetadata& HdrMetadata::operator=(const HdrMetadata& rhs) = default;
 
 }  // namespace webrtc
diff --git a/api/video/hdr_metadata.h b/api/video/hdr_metadata.h
index be0c173..676a900 100644
--- a/api/video/hdr_metadata.h
+++ b/api/video/hdr_metadata.h
@@ -30,9 +30,6 @@
     }
 
     Chromaticity();
-    Chromaticity(const Chromaticity& rhs);
-    Chromaticity(Chromaticity&& rhs);
-    Chromaticity& operator=(const Chromaticity& rhs);
   };
 
   // The nominal primaries of the mastering display.
@@ -54,9 +51,6 @@
   float luminance_min = 0.0f;
 
   HdrMasteringMetadata();
-  HdrMasteringMetadata(const HdrMasteringMetadata& rhs);
-  HdrMasteringMetadata(HdrMasteringMetadata&& rhs);
-  HdrMasteringMetadata& operator=(const HdrMasteringMetadata& rhs);
 
   bool operator==(const HdrMasteringMetadata& rhs) const {
     return ((primary_r == rhs.primary_r) && (primary_g == rhs.primary_g) &&
@@ -79,9 +73,6 @@
   uint32_t max_frame_average_light_level = 0;
 
   HdrMetadata();
-  HdrMetadata(const HdrMetadata& rhs);
-  HdrMetadata(HdrMetadata&& rhs);
-  HdrMetadata& operator=(const HdrMetadata& rhs);
 
   bool operator==(const HdrMetadata& rhs) const {
     return (
diff --git a/api/video/video_codec_type.h b/api/video/video_codec_type.h
new file mode 100644
index 0000000..447723c
--- /dev/null
+++ b/api/video/video_codec_type.h
@@ -0,0 +1,30 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef API_VIDEO_VIDEO_CODEC_TYPE_H_
+#define API_VIDEO_VIDEO_CODEC_TYPE_H_
+
+namespace webrtc {
+
+// Video codec types
+enum VideoCodecType {
+  // There are various memset(..., 0, ...) calls in the code that rely on
+  // kVideoCodecGeneric being zero.
+  kVideoCodecGeneric = 0,
+  kVideoCodecVP8,
+  kVideoCodecVP9,
+  kVideoCodecH264,
+  kVideoCodecI420,
+  kVideoCodecMultiplex,
+};
+
+}  // namespace webrtc
+
+#endif  // API_VIDEO_VIDEO_CODEC_TYPE_H_
diff --git a/api/video/video_frame.cc b/api/video/video_frame.cc
index 12da43f..eaae33b 100644
--- a/api/video/video_frame.cc
+++ b/api/video/video_frame.cc
@@ -21,7 +21,7 @@
 
 VideoFrame VideoFrame::Builder::build() {
   return VideoFrame(video_frame_buffer_, timestamp_us_, timestamp_rtp_,
-                    ntp_time_ms_, rotation_, color_space_, hdr_metadata_);
+                    ntp_time_ms_, rotation_, color_space_);
 }
 
 VideoFrame::Builder& VideoFrame::Builder::set_video_frame_buffer(
@@ -64,9 +64,10 @@
   return *this;
 }
 
-VideoFrame::Builder& VideoFrame::Builder::set_hdr_metadata(
-    const HdrMetadata& hdr_metadata) {
-  hdr_metadata_ = hdr_metadata;
+VideoFrame::Builder& VideoFrame::Builder::set_color_space(
+    const ColorSpace* color_space) {
+  color_space_ =
+      color_space ? absl::make_optional(*color_space) : absl::nullopt;
   return *this;
 }
 
@@ -96,15 +97,13 @@
                        uint32_t timestamp_rtp,
                        int64_t ntp_time_ms,
                        VideoRotation rotation,
-                       const absl::optional<ColorSpace>& color_space,
-                       const absl::optional<HdrMetadata>& hdr_metadata)
+                       const absl::optional<ColorSpace>& color_space)
     : video_frame_buffer_(buffer),
       timestamp_rtp_(timestamp_rtp),
       ntp_time_ms_(ntp_time_ms),
       timestamp_us_(timestamp_us),
       rotation_(rotation),
-      color_space_(color_space),
-      hdr_metadata_(hdr_metadata) {}
+      color_space_(color_space) {}
 
 VideoFrame::~VideoFrame() = default;
 
diff --git a/api/video/video_frame.h b/api/video/video_frame.h
index 58362b0..2c5d081 100644
--- a/api/video/video_frame.h
+++ b/api/video/video_frame.h
@@ -40,7 +40,7 @@
     Builder& set_ntp_time_ms(int64_t ntp_time_ms);
     Builder& set_rotation(VideoRotation rotation);
     Builder& set_color_space(const ColorSpace& color_space);
-    Builder& set_hdr_metadata(const HdrMetadata& hdr_metadata);
+    Builder& set_color_space(const ColorSpace* color_space);
 
    private:
     rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
@@ -49,7 +49,6 @@
     int64_t ntp_time_ms_ = 0;
     VideoRotation rotation_ = kVideoRotation_0;
     absl::optional<ColorSpace> color_space_;
-    absl::optional<HdrMetadata> hdr_metadata_;
   };
 
   // To be deprecated. Migrate all use to Builder.
@@ -116,10 +115,9 @@
   void set_rotation(VideoRotation rotation) { rotation_ = rotation; }
 
   // Get color space when available.
-  absl::optional<ColorSpace> color_space() const { return color_space_; }
-
-  // Get HDR metadata when available.
-  absl::optional<HdrMetadata> hdr_metadata() const { return hdr_metadata_; }
+  const ColorSpace* color_space() const {
+    return color_space_ ? &*color_space_ : nullptr;
+  }
 
   // Get render time in milliseconds.
   // TODO(nisse): Deprecated. Migrate all users to timestamp_us().
@@ -141,8 +139,7 @@
              uint32_t timestamp_rtp,
              int64_t ntp_time_ms,
              VideoRotation rotation,
-             const absl::optional<ColorSpace>& color_space,
-             const absl::optional<HdrMetadata>& hdr_metadata);
+             const absl::optional<ColorSpace>& color_space);
 
   // An opaque reference counted handle that stores the pixel data.
   rtc::scoped_refptr<webrtc::VideoFrameBuffer> video_frame_buffer_;
@@ -151,7 +148,6 @@
   int64_t timestamp_us_;
   VideoRotation rotation_;
   absl::optional<ColorSpace> color_space_;
-  absl::optional<HdrMetadata> hdr_metadata_;
 };
 
 }  // namespace webrtc
diff --git a/audio/BUILD.gn b/audio/BUILD.gn
index 4b2ec61..c045af6 100644
--- a/audio/BUILD.gn
+++ b/audio/BUILD.gn
@@ -26,19 +26,13 @@
     "audio_transport_impl.h",
     "channel_receive.cc",
     "channel_receive.h",
-    "channel_receive_proxy.cc",
-    "channel_receive_proxy.h",
     "channel_send.cc",
     "channel_send.h",
-    "channel_send_proxy.cc",
-    "channel_send_proxy.h",
     "conversion.h",
     "null_audio_poller.cc",
     "null_audio_poller.h",
     "remix_resample.cc",
     "remix_resample.h",
-    "time_interval.cc",
-    "time_interval.h",
     "transport_feedback_packet_loss_tracker.cc",
     "transport_feedback_packet_loss_tracker.h",
   ]
@@ -49,7 +43,6 @@
   }
 
   deps = [
-    "..:webrtc_common",
     "../api:array_view",
     "../api:call_api",
     "../api:libjingle_peerconnection_api",
@@ -131,7 +124,6 @@
       "remix_resample_unittest.cc",
       "test/audio_stats_test.cc",
       "test/media_transport_test.cc",
-      "time_interval_unittest.cc",
       "transport_feedback_packet_loss_tracker_unittest.cc",
     ]
     deps = [
@@ -155,6 +147,7 @@
       "../logging:mocks",
       "../logging:rtc_event_log_api",
       "../modules/audio_device:mock_audio_device",
+      "../rtc_base:rtc_base_tests_utils",
 
       # For TestAudioDeviceModule
       "../modules/audio_device:audio_device_impl",
@@ -168,7 +161,6 @@
       "../modules/utility",
       "../rtc_base:checks",
       "../rtc_base:rtc_base_approved",
-      "../rtc_base:rtc_base_tests_utils",
       "../rtc_base:rtc_task_queue",
       "../rtc_base:safe_compare",
       "../system_wrappers:system_wrappers",
diff --git a/audio/audio_receive_stream.cc b/audio/audio_receive_stream.cc
index 4f2e29c..8d4afe0 100644
--- a/audio/audio_receive_stream.cc
+++ b/audio/audio_receive_stream.cc
@@ -21,11 +21,9 @@
 #include "audio/audio_send_stream.h"
 #include "audio/audio_state.h"
 #include "audio/channel_receive.h"
-#include "audio/channel_receive_proxy.h"
 #include "audio/conversion.h"
 #include "call/rtp_config.h"
 #include "call/rtp_stream_receiver_controller_interface.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 #include "rtc_base/strings/string_builder.h"
@@ -68,7 +66,7 @@
 
 namespace internal {
 namespace {
-std::unique_ptr<voe::ChannelReceiveProxy> CreateChannelAndProxy(
+std::unique_ptr<voe::ChannelReceiveInterface> CreateChannelReceive(
     webrtc::AudioState* audio_state,
     ProcessThread* module_process_thread,
     const webrtc::AudioReceiveStream::Config& config,
@@ -76,13 +74,13 @@
   RTC_DCHECK(audio_state);
   internal::AudioState* internal_audio_state =
       static_cast<internal::AudioState*>(audio_state);
-  return absl::make_unique<voe::ChannelReceiveProxy>(
-      absl::make_unique<voe::ChannelReceive>(
-          module_process_thread, internal_audio_state->audio_device_module(),
-          config.media_transport, config.rtcp_send_transport, event_log,
-          config.rtp.remote_ssrc, config.jitter_buffer_max_packets,
-          config.jitter_buffer_fast_accelerate, config.decoder_factory,
-          config.codec_pair_id, config.frame_decryptor, config.crypto_options));
+  return voe::CreateChannelReceive(
+      module_process_thread, internal_audio_state->audio_device_module(),
+      config.media_transport, config.rtcp_send_transport, event_log,
+      config.rtp.remote_ssrc, config.jitter_buffer_max_packets,
+      config.jitter_buffer_fast_accelerate, config.jitter_buffer_min_delay_ms,
+      config.decoder_factory, config.codec_pair_id, config.frame_decryptor,
+      config.crypto_options);
 }
 }  // namespace
 
@@ -98,10 +96,10 @@
                          config,
                          audio_state,
                          event_log,
-                         CreateChannelAndProxy(audio_state.get(),
-                                               module_process_thread,
-                                               config,
-                                               event_log)) {}
+                         CreateChannelReceive(audio_state.get(),
+                                              module_process_thread,
+                                              config,
+                                              event_log)) {}
 
 AudioReceiveStream::AudioReceiveStream(
     RtpStreamReceiverControllerInterface* receiver_controller,
@@ -109,13 +107,13 @@
     const webrtc::AudioReceiveStream::Config& config,
     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
     webrtc::RtcEventLog* event_log,
-    std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy)
-    : audio_state_(audio_state), channel_proxy_(std::move(channel_proxy)) {
+    std::unique_ptr<voe::ChannelReceiveInterface> channel_receive)
+    : audio_state_(audio_state), channel_receive_(std::move(channel_receive)) {
   RTC_LOG(LS_INFO) << "AudioReceiveStream: " << config.rtp.remote_ssrc;
   RTC_DCHECK(config.decoder_factory);
   RTC_DCHECK(config.rtcp_send_transport);
   RTC_DCHECK(audio_state_);
-  RTC_DCHECK(channel_proxy_);
+  RTC_DCHECK(channel_receive_);
 
   module_process_thread_checker_.DetachFromThread();
 
@@ -123,11 +121,11 @@
     RTC_DCHECK(receiver_controller);
     RTC_DCHECK(packet_router);
     // Configure bandwidth estimation.
-    channel_proxy_->RegisterReceiverCongestionControlObjects(packet_router);
+    channel_receive_->RegisterReceiverCongestionControlObjects(packet_router);
 
     // Register with transport.
     rtp_stream_receiver_ = receiver_controller->CreateReceiver(
-        config.rtp.remote_ssrc, channel_proxy_.get());
+        config.rtp.remote_ssrc, channel_receive_.get());
   }
   ConfigureStream(this, config, true);
 }
@@ -136,9 +134,9 @@
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_LOG(LS_INFO) << "~AudioReceiveStream: " << config_.rtp.remote_ssrc;
   Stop();
-  channel_proxy_->DisassociateSendChannel();
+  channel_receive_->SetAssociatedSendChannel(nullptr);
   if (!config_.media_transport) {
-    channel_proxy_->ResetReceiverCongestionControlObjects();
+    channel_receive_->ResetReceiverCongestionControlObjects();
   }
 }
 
@@ -153,7 +151,7 @@
   if (playing_) {
     return;
   }
-  channel_proxy_->StartPlayout();
+  channel_receive_->StartPlayout();
   playing_ = true;
   audio_state()->AddReceivingStream(this);
 }
@@ -163,7 +161,7 @@
   if (!playing_) {
     return;
   }
-  channel_proxy_->StopPlayout();
+  channel_receive_->StopPlayout();
   playing_ = false;
   audio_state()->RemoveReceivingStream(this);
 }
@@ -174,11 +172,11 @@
   stats.remote_ssrc = config_.rtp.remote_ssrc;
 
   webrtc::CallReceiveStatistics call_stats =
-      channel_proxy_->GetRTCPStatistics();
+      channel_receive_->GetRTCPStatistics();
   // TODO(solenberg): Don't return here if we can't get the codec - return the
   //                  stats we *can* get.
   webrtc::CodecInst codec_inst = {0};
-  if (!channel_proxy_->GetRecCodec(&codec_inst)) {
+  if (!channel_receive_->GetRecCodec(&codec_inst)) {
     return stats;
   }
 
@@ -195,13 +193,13 @@
   if (codec_inst.plfreq / 1000 > 0) {
     stats.jitter_ms = call_stats.jitterSamples / (codec_inst.plfreq / 1000);
   }
-  stats.delay_estimate_ms = channel_proxy_->GetDelayEstimate();
-  stats.audio_level = channel_proxy_->GetSpeechOutputLevelFullRange();
-  stats.total_output_energy = channel_proxy_->GetTotalOutputEnergy();
-  stats.total_output_duration = channel_proxy_->GetTotalOutputDuration();
+  stats.delay_estimate_ms = channel_receive_->GetDelayEstimate();
+  stats.audio_level = channel_receive_->GetSpeechOutputLevelFullRange();
+  stats.total_output_energy = channel_receive_->GetTotalOutputEnergy();
+  stats.total_output_duration = channel_receive_->GetTotalOutputDuration();
 
   // Get jitter buffer and total delay (alg + jitter + playout) stats.
-  auto ns = channel_proxy_->GetNetworkStatistics();
+  auto ns = channel_receive_->GetNetworkStatistics();
   stats.jitter_buffer_ms = ns.currentBufferSize;
   stats.jitter_buffer_preferred_ms = ns.preferredBufferSize;
   stats.total_samples_received = ns.totalSamplesReceived;
@@ -216,8 +214,10 @@
   stats.secondary_discarded_rate = Q14ToFloat(ns.currentSecondaryDiscardedRate);
   stats.accelerate_rate = Q14ToFloat(ns.currentAccelerateRate);
   stats.preemptive_expand_rate = Q14ToFloat(ns.currentPreemptiveRate);
+  stats.jitter_buffer_flushes = ns.packetBufferFlushes;
+  stats.delayed_packet_outage_samples = ns.delayedPacketOutageSamples;
 
-  auto ds = channel_proxy_->GetDecodingCallStatistics();
+  auto ds = channel_receive_->GetDecodingCallStatistics();
   stats.decoding_calls_to_silence_generator = ds.calls_to_silence_generator;
   stats.decoding_calls_to_neteq = ds.calls_to_neteq;
   stats.decoding_normal = ds.decoded_normal;
@@ -231,23 +231,23 @@
 
 void AudioReceiveStream::SetSink(AudioSinkInterface* sink) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  channel_proxy_->SetSink(sink);
+  channel_receive_->SetSink(sink);
 }
 
 void AudioReceiveStream::SetGain(float gain) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  channel_proxy_->SetChannelOutputVolumeScaling(gain);
+  channel_receive_->SetChannelOutputVolumeScaling(gain);
 }
 
 std::vector<RtpSource> AudioReceiveStream::GetSources() const {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  return channel_proxy_->GetSources();
+  return channel_receive_->GetSources();
 }
 
 AudioMixer::Source::AudioFrameInfo AudioReceiveStream::GetAudioFrameWithInfo(
     int sample_rate_hz,
     AudioFrame* audio_frame) {
-  return channel_proxy_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
+  return channel_receive_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
 }
 
 int AudioReceiveStream::Ssrc() const {
@@ -255,7 +255,7 @@
 }
 
 int AudioReceiveStream::PreferredSampleRate() const {
-  return channel_proxy_->PreferredSampleRate();
+  return channel_receive_->PreferredSampleRate();
 }
 
 int AudioReceiveStream::id() const {
@@ -265,32 +265,29 @@
 
 absl::optional<Syncable::Info> AudioReceiveStream::GetInfo() const {
   RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
-  absl::optional<Syncable::Info> info = channel_proxy_->GetSyncInfo();
+  absl::optional<Syncable::Info> info = channel_receive_->GetSyncInfo();
 
   if (!info)
     return absl::nullopt;
 
-  info->current_delay_ms = channel_proxy_->GetDelayEstimate();
+  info->current_delay_ms = channel_receive_->GetDelayEstimate();
   return info;
 }
 
 uint32_t AudioReceiveStream::GetPlayoutTimestamp() const {
   // Called on video capture thread.
-  return channel_proxy_->GetPlayoutTimestamp();
+  return channel_receive_->GetPlayoutTimestamp();
 }
 
 void AudioReceiveStream::SetMinimumPlayoutDelay(int delay_ms) {
   RTC_DCHECK_RUN_ON(&module_process_thread_checker_);
-  return channel_proxy_->SetMinimumPlayoutDelay(delay_ms);
+  return channel_receive_->SetMinimumPlayoutDelay(delay_ms);
 }
 
 void AudioReceiveStream::AssociateSendStream(AudioSendStream* send_stream) {
   RTC_DCHECK_RUN_ON(&worker_thread_checker_);
-  if (send_stream) {
-    channel_proxy_->AssociateSendChannel(send_stream->GetChannelProxy());
-  } else {
-    channel_proxy_->DisassociateSendChannel();
-  }
+  channel_receive_->SetAssociatedSendChannel(
+      send_stream ? send_stream->GetChannel() : nullptr);
   associated_send_stream_ = send_stream;
 }
 
@@ -303,7 +300,7 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
-  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+  return channel_receive_->ReceivedRTCPPacket(packet, length);
 }
 
 void AudioReceiveStream::OnRtpPacket(const RtpPacketReceived& packet) {
@@ -311,7 +308,7 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!thread_checker_.CalledOnValidThread());
-  channel_proxy_->OnRtpPacket(packet);
+  channel_receive_->OnRtpPacket(packet);
 }
 
 const webrtc::AudioReceiveStream::Config& AudioReceiveStream::config() const {
@@ -337,7 +334,7 @@
   RTC_LOG(LS_INFO) << "AudioReceiveStream::ConfigureStream: "
                    << new_config.ToString();
   RTC_DCHECK(stream);
-  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& channel_receive = stream->channel_receive_;
   const auto& old_config = stream->config_;
 
   // Configuration parameters which cannot be changed.
@@ -351,7 +348,7 @@
              old_config.decoder_factory == new_config.decoder_factory);
 
   if (first_time || old_config.rtp.local_ssrc != new_config.rtp.local_ssrc) {
-    channel_proxy->SetLocalSSRC(new_config.rtp.local_ssrc);
+    channel_receive->SetLocalSSRC(new_config.rtp.local_ssrc);
   }
 
   if (!first_time) {
@@ -363,11 +360,11 @@
   // using the actual packet size for the configured codec.
   if (first_time || old_config.rtp.nack.rtp_history_ms !=
                         new_config.rtp.nack.rtp_history_ms) {
-    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
-                                 new_config.rtp.nack.rtp_history_ms / 20);
+    channel_receive->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
+                                   new_config.rtp.nack.rtp_history_ms / 20);
   }
   if (first_time || old_config.decoder_map != new_config.decoder_map) {
-    channel_proxy->SetReceiveCodecs(new_config.decoder_map);
+    channel_receive->SetReceiveCodecs(new_config.decoder_map);
   }
 
   stream->config_ = new_config;
diff --git a/audio/audio_receive_stream.h b/audio/audio_receive_stream.h
index dde0da4..86bcb1c 100644
--- a/audio/audio_receive_stream.h
+++ b/audio/audio_receive_stream.h
@@ -31,7 +31,7 @@
 class RtpStreamReceiverInterface;
 
 namespace voe {
-class ChannelReceiveProxy;
+class ChannelReceiveInterface;
 }  // namespace voe
 
 namespace internal {
@@ -47,13 +47,14 @@
                      const webrtc::AudioReceiveStream::Config& config,
                      const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
                      webrtc::RtcEventLog* event_log);
-  // For unit tests, which need to supply a mock channel proxy.
-  AudioReceiveStream(RtpStreamReceiverControllerInterface* receiver_controller,
-                     PacketRouter* packet_router,
-                     const webrtc::AudioReceiveStream::Config& config,
-                     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
-                     webrtc::RtcEventLog* event_log,
-                     std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy);
+  // For unit tests, which need to supply a mock channel receive.
+  AudioReceiveStream(
+      RtpStreamReceiverControllerInterface* receiver_controller,
+      PacketRouter* packet_router,
+      const webrtc::AudioReceiveStream::Config& config,
+      const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
+      webrtc::RtcEventLog* event_log,
+      std::unique_ptr<voe::ChannelReceiveInterface> channel_receive);
   ~AudioReceiveStream() override;
 
   // webrtc::AudioReceiveStream implementation.
@@ -100,7 +101,7 @@
   rtc::ThreadChecker module_process_thread_checker_;
   webrtc::AudioReceiveStream::Config config_;
   rtc::scoped_refptr<webrtc::AudioState> audio_state_;
-  std::unique_ptr<voe::ChannelReceiveProxy> channel_proxy_;
+  const std::unique_ptr<voe::ChannelReceiveInterface> channel_receive_;
   AudioSendStream* associated_send_stream_ = nullptr;
 
   bool playing_ RTC_GUARDED_BY(worker_thread_checker_) = false;
diff --git a/audio/audio_receive_stream_unittest.cc b/audio/audio_receive_stream_unittest.cc
index a5c7e20..7422810 100644
--- a/audio/audio_receive_stream_unittest.cc
+++ b/audio/audio_receive_stream_unittest.cc
@@ -83,16 +83,16 @@
         new rtc::RefCountedObject<testing::NiceMock<MockAudioDeviceModule>>();
     audio_state_ = AudioState::Create(config);
 
-    channel_proxy_ = new testing::StrictMock<MockChannelReceiveProxy>();
-    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kLocalSsrc)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 15)).Times(1);
-    EXPECT_CALL(*channel_proxy_,
+    channel_receive_ = new testing::StrictMock<MockChannelReceive>();
+    EXPECT_CALL(*channel_receive_, SetLocalSSRC(kLocalSsrc)).Times(1);
+    EXPECT_CALL(*channel_receive_, SetNACKStatus(true, 15)).Times(1);
+    EXPECT_CALL(*channel_receive_,
                 RegisterReceiverCongestionControlObjects(&packet_router_))
         .Times(1);
-    EXPECT_CALL(*channel_proxy_, ResetReceiverCongestionControlObjects())
+    EXPECT_CALL(*channel_receive_, ResetReceiverCongestionControlObjects())
         .Times(1);
-    EXPECT_CALL(*channel_proxy_, DisassociateSendChannel()).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetReceiveCodecs(_))
+    EXPECT_CALL(*channel_receive_, SetAssociatedSendChannel(nullptr)).Times(1);
+    EXPECT_CALL(*channel_receive_, SetReceiveCodecs(_))
         .WillRepeatedly(Invoke([](const std::map<int, SdpAudioFormat>& codecs) {
           EXPECT_THAT(codecs, testing::IsEmpty());
         }));
@@ -114,33 +114,33 @@
         new internal::AudioReceiveStream(
             &rtp_stream_receiver_controller_, &packet_router_, stream_config_,
             audio_state_, &event_log_,
-            std::unique_ptr<voe::ChannelReceiveProxy>(channel_proxy_)));
+            std::unique_ptr<voe::ChannelReceiveInterface>(channel_receive_)));
   }
 
   AudioReceiveStream::Config& config() { return stream_config_; }
   rtc::scoped_refptr<MockAudioMixer> audio_mixer() { return audio_mixer_; }
-  MockChannelReceiveProxy* channel_proxy() { return channel_proxy_; }
+  MockChannelReceive* channel_receive() { return channel_receive_; }
 
   void SetupMockForGetStats() {
     using testing::DoAll;
     using testing::SetArgPointee;
 
-    ASSERT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+    ASSERT_TRUE(channel_receive_);
+    EXPECT_CALL(*channel_receive_, GetRTCPStatistics())
         .WillOnce(Return(kCallStats));
-    EXPECT_CALL(*channel_proxy_, GetDelayEstimate())
+    EXPECT_CALL(*channel_receive_, GetDelayEstimate())
         .WillOnce(Return(kJitterBufferDelay + kPlayoutBufferDelay));
-    EXPECT_CALL(*channel_proxy_, GetSpeechOutputLevelFullRange())
+    EXPECT_CALL(*channel_receive_, GetSpeechOutputLevelFullRange())
         .WillOnce(Return(kSpeechOutputLevel));
-    EXPECT_CALL(*channel_proxy_, GetTotalOutputEnergy())
+    EXPECT_CALL(*channel_receive_, GetTotalOutputEnergy())
         .WillOnce(Return(kTotalOutputEnergy));
-    EXPECT_CALL(*channel_proxy_, GetTotalOutputDuration())
+    EXPECT_CALL(*channel_receive_, GetTotalOutputDuration())
         .WillOnce(Return(kTotalOutputDuration));
-    EXPECT_CALL(*channel_proxy_, GetNetworkStatistics())
+    EXPECT_CALL(*channel_receive_, GetNetworkStatistics())
         .WillOnce(Return(kNetworkStats));
-    EXPECT_CALL(*channel_proxy_, GetDecodingCallStatistics())
+    EXPECT_CALL(*channel_receive_, GetDecodingCallStatistics())
         .WillOnce(Return(kAudioDecodeStats));
-    EXPECT_CALL(*channel_proxy_, GetRecCodec(_))
+    EXPECT_CALL(*channel_receive_, GetRecCodec(_))
         .WillOnce(DoAll(SetArgPointee<0>(kCodecInst), Return(true)));
   }
 
@@ -150,7 +150,7 @@
   rtc::scoped_refptr<AudioState> audio_state_;
   rtc::scoped_refptr<MockAudioMixer> audio_mixer_;
   AudioReceiveStream::Config stream_config_;
-  testing::StrictMock<MockChannelReceiveProxy>* channel_proxy_ = nullptr;
+  testing::StrictMock<MockChannelReceive>* channel_receive_ = nullptr;
   RtpStreamReceiverController rtp_stream_receiver_controller_;
   MockTransport rtcp_send_transport_;
 };
@@ -239,7 +239,7 @@
   ASSERT_TRUE(parsed_packet.Parse(&rtp_packet[0], rtp_packet.size()));
   parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
 
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               OnRtpPacket(testing::Ref(parsed_packet)));
 
   recv_stream->OnRtpPacket(parsed_packet);
@@ -250,7 +250,7 @@
   helper.config().rtp.transport_cc = true;
   auto recv_stream = helper.CreateAudioReceiveStream();
   std::vector<uint8_t> rtcp_packet = CreateRtcpSenderReport();
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               ReceivedRTCPPacket(&rtcp_packet[0], rtcp_packet.size()))
       .WillOnce(Return(true));
   EXPECT_TRUE(recv_stream->DeliverRtcp(&rtcp_packet[0], rtcp_packet.size()));
@@ -312,7 +312,7 @@
 TEST(AudioReceiveStreamTest, SetGain) {
   ConfigHelper helper;
   auto recv_stream = helper.CreateAudioReceiveStream();
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_receive(),
               SetChannelOutputVolumeScaling(FloatEq(0.765f)));
   recv_stream->SetGain(0.765f);
 }
@@ -323,10 +323,10 @@
   auto recv_stream1 = helper1.CreateAudioReceiveStream();
   auto recv_stream2 = helper2.CreateAudioReceiveStream();
 
-  EXPECT_CALL(*helper1.channel_proxy(), StartPlayout()).Times(1);
-  EXPECT_CALL(*helper2.channel_proxy(), StartPlayout()).Times(1);
-  EXPECT_CALL(*helper1.channel_proxy(), StopPlayout()).Times(1);
-  EXPECT_CALL(*helper2.channel_proxy(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper1.channel_receive(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_receive(), StartPlayout()).Times(1);
+  EXPECT_CALL(*helper1.channel_receive(), StopPlayout()).Times(1);
+  EXPECT_CALL(*helper2.channel_receive(), StopPlayout()).Times(1);
   EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream1.get()))
       .WillOnce(Return(true));
   EXPECT_CALL(*helper1.audio_mixer(), AddSource(recv_stream2.get()))
@@ -367,10 +367,10 @@
                    kTransportSequenceNumberId + 1));
   new_config.decoder_map.emplace(1, SdpAudioFormat("foo", 8000, 1));
 
-  MockChannelReceiveProxy& channel_proxy = *helper.channel_proxy();
-  EXPECT_CALL(channel_proxy, SetLocalSSRC(kLocalSsrc + 1)).Times(1);
-  EXPECT_CALL(channel_proxy, SetNACKStatus(true, 15 + 1)).Times(1);
-  EXPECT_CALL(channel_proxy, SetReceiveCodecs(new_config.decoder_map));
+  MockChannelReceive& channel_receive = *helper.channel_receive();
+  EXPECT_CALL(channel_receive, SetLocalSSRC(kLocalSsrc + 1)).Times(1);
+  EXPECT_CALL(channel_receive, SetNACKStatus(true, 15 + 1)).Times(1);
+  EXPECT_CALL(channel_receive, SetReceiveCodecs(new_config.decoder_map));
 
   recv_stream->Reconfigure(new_config);
 }
diff --git a/audio/audio_send_stream.cc b/audio/audio_send_stream.cc
index 37f89c5..75e6efb 100644
--- a/audio/audio_send_stream.cc
+++ b/audio/audio_send_stream.cc
@@ -22,12 +22,10 @@
 #include "api/crypto/frameencryptorinterface.h"
 #include "audio/audio_state.h"
 #include "audio/channel_send.h"
-#include "audio/channel_send_proxy.h"
 #include "audio/conversion.h"
 #include "call/rtp_config.h"
 #include "call/rtp_transport_controller_send_interface.h"
 #include "common_audio/vad/include/vad.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "logging/rtc_event_log/events/rtc_event_audio_send_stream_config.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "logging/rtc_event_log/rtc_stream_config.h"
@@ -50,29 +48,14 @@
 constexpr size_t kPacketLossRateMinNumAckedPackets = 50;
 constexpr size_t kRecoverablePacketLossRateMinNumAckedPairs = 40;
 
-void CallEncoder(const std::unique_ptr<voe::ChannelSendProxy>& channel_proxy,
+void CallEncoder(const std::unique_ptr<voe::ChannelSendInterface>& channel_send,
                  rtc::FunctionView<void(AudioEncoder*)> lambda) {
-  channel_proxy->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
+  channel_send->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
     RTC_DCHECK(encoder_ptr);
     lambda(encoder_ptr->get());
   });
 }
 
-std::unique_ptr<voe::ChannelSendProxy> CreateChannelAndProxy(
-    rtc::TaskQueue* worker_queue,
-    ProcessThread* module_process_thread,
-    MediaTransportInterface* media_transport,
-    RtcpRttStats* rtcp_rtt_stats,
-    RtcEventLog* event_log,
-    FrameEncryptorInterface* frame_encryptor,
-    const webrtc::CryptoOptions& crypto_options,
-    bool extmap_allow_mixed) {
-  return absl::make_unique<voe::ChannelSendProxy>(
-      absl::make_unique<voe::ChannelSend>(
-          worker_queue, module_process_thread, media_transport, rtcp_rtt_stats,
-          event_log, frame_encryptor, crypto_options, extmap_allow_mixed));
-}
-
 void UpdateEventLogStreamConfig(RtcEventLog* event_log,
                                 const AudioSendStream::Config& config,
                                 const AudioSendStream::Config* old_config) {
@@ -107,29 +90,6 @@
 
 }  // namespace
 
-// Helper class to track the actively sending lifetime of this stream.
-class AudioSendStream::TimedTransport : public Transport {
- public:
-  TimedTransport(Transport* transport, TimeInterval* time_interval)
-      : transport_(transport), lifetime_(time_interval) {}
-  bool SendRtp(const uint8_t* packet,
-               size_t length,
-               const PacketOptions& options) {
-    if (lifetime_) {
-      lifetime_->Extend();
-    }
-    return transport_->SendRtp(packet, length, options);
-  }
-  bool SendRtcp(const uint8_t* packet, size_t length) {
-    return transport_->SendRtcp(packet, length);
-  }
-  ~TimedTransport() {}
-
- private:
-  Transport* transport_;
-  TimeInterval* lifetime_;
-};
-
 AudioSendStream::AudioSendStream(
     const webrtc::AudioSendStream::Config& config,
     const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
@@ -139,8 +99,7 @@
     BitrateAllocatorInterface* bitrate_allocator,
     RtcEventLog* event_log,
     RtcpRttStats* rtcp_rtt_stats,
-    const absl::optional<RtpState>& suspended_rtp_state,
-    TimeInterval* overall_call_lifetime)
+    const absl::optional<RtpState>& suspended_rtp_state)
     : AudioSendStream(config,
                       audio_state,
                       worker_queue,
@@ -149,15 +108,16 @@
                       event_log,
                       rtcp_rtt_stats,
                       suspended_rtp_state,
-                      overall_call_lifetime,
-                      CreateChannelAndProxy(worker_queue,
-                                            module_process_thread,
-                                            config.media_transport,
-                                            rtcp_rtt_stats,
-                                            event_log,
-                                            config.frame_encryptor,
-                                            config.crypto_options,
-                                            config.rtp.extmap_allow_mixed)) {}
+                      voe::CreateChannelSend(worker_queue,
+                                             module_process_thread,
+                                             config.media_transport,
+                                             config.send_transport,
+                                             rtcp_rtt_stats,
+                                             event_log,
+                                             config.frame_encryptor,
+                                             config.crypto_options,
+                                             config.rtp.extmap_allow_mixed,
+                                             config.rtcp_report_interval_ms)) {}
 
 AudioSendStream::AudioSendStream(
     const webrtc::AudioSendStream::Config& config,
@@ -168,13 +128,12 @@
     RtcEventLog* event_log,
     RtcpRttStats* rtcp_rtt_stats,
     const absl::optional<RtpState>& suspended_rtp_state,
-    TimeInterval* overall_call_lifetime,
-    std::unique_ptr<voe::ChannelSendProxy> channel_proxy)
+    std::unique_ptr<voe::ChannelSendInterface> channel_send)
     : worker_queue_(worker_queue),
       config_(Config(/*send_transport=*/nullptr,
                      /*media_transport=*/nullptr)),
       audio_state_(audio_state),
-      channel_proxy_(std::move(channel_proxy)),
+      channel_send_(std::move(channel_send)),
       event_log_(event_log),
       bitrate_allocator_(bitrate_allocator),
       rtp_transport_(rtp_transport),
@@ -182,22 +141,19 @@
                            kPacketLossRateMinNumAckedPackets,
                            kRecoverablePacketLossRateMinNumAckedPairs),
       rtp_rtcp_module_(nullptr),
-      suspended_rtp_state_(suspended_rtp_state),
-      overall_call_lifetime_(overall_call_lifetime) {
+      suspended_rtp_state_(suspended_rtp_state) {
   RTC_LOG(LS_INFO) << "AudioSendStream: " << config.rtp.ssrc;
   RTC_DCHECK(worker_queue_);
   RTC_DCHECK(audio_state_);
-  RTC_DCHECK(channel_proxy_);
+  RTC_DCHECK(channel_send_);
   RTC_DCHECK(bitrate_allocator_);
   // TODO(nisse): Eventually, we should have only media_transport. But for the
   // time being, we can have either. When media transport is injected, there
   // should be no rtp_transport, and below check should be strengthened to XOR
   // (either rtp_transport or media_transport but not both).
   RTC_DCHECK(rtp_transport || config.media_transport);
-  RTC_DCHECK(overall_call_lifetime_);
 
-  channel_proxy_->SetRTCPStatus(true);
-  rtp_rtcp_module_ = channel_proxy_->GetRtpRtcp();
+  rtp_rtcp_module_ = channel_send_->GetRtpRtcp();
   RTC_DCHECK(rtp_rtcp_module_);
 
   ConfigureStream(this, config, true);
@@ -216,13 +172,8 @@
   RTC_DCHECK(!sending_);
   if (rtp_transport_) {
     rtp_transport_->DeRegisterPacketFeedbackObserver(this);
-    channel_proxy_->RegisterTransport(nullptr);
-    channel_proxy_->ResetSenderCongestionControlObjects();
+    channel_send_->ResetSenderCongestionControlObjects();
   }
-  // Lifetime can only be updated after deregistering
-  // |timed_send_transport_adapter_| in the underlying channel object to avoid
-  // data races in |active_lifetime_|.
-  overall_call_lifetime_->Extend(active_lifetime_);
 }
 
 const webrtc::AudioSendStream::Config& AudioSendStream::GetConfig() const {
@@ -260,56 +211,39 @@
   UpdateEventLogStreamConfig(stream->event_log_, new_config,
                              first_time ? nullptr : &stream->config_);
 
-  const auto& channel_proxy = stream->channel_proxy_;
+  const auto& channel_send = stream->channel_send_;
   const auto& old_config = stream->config_;
 
+  // Configuration parameters which cannot be changed.
+  RTC_DCHECK(first_time ||
+             old_config.send_transport == new_config.send_transport);
+
   if (first_time || old_config.rtp.ssrc != new_config.rtp.ssrc) {
-    channel_proxy->SetLocalSSRC(new_config.rtp.ssrc);
+    channel_send->SetLocalSSRC(new_config.rtp.ssrc);
     if (stream->suspended_rtp_state_) {
       stream->rtp_rtcp_module_->SetRtpState(*stream->suspended_rtp_state_);
     }
   }
   if (first_time || old_config.rtp.c_name != new_config.rtp.c_name) {
-    channel_proxy->SetRTCP_CNAME(new_config.rtp.c_name);
-  }
-  // TODO(solenberg): Config NACK history window (which is a packet count),
-  // using the actual packet size for the configured codec.
-  if (first_time || old_config.rtp.nack.rtp_history_ms !=
-                        new_config.rtp.nack.rtp_history_ms) {
-    channel_proxy->SetNACKStatus(new_config.rtp.nack.rtp_history_ms != 0,
-                                 new_config.rtp.nack.rtp_history_ms / 20);
-  }
-
-  if (first_time || new_config.send_transport != old_config.send_transport) {
-    if (old_config.send_transport) {
-      channel_proxy->RegisterTransport(nullptr);
-    }
-    if (new_config.send_transport) {
-      stream->timed_send_transport_adapter_.reset(new TimedTransport(
-          new_config.send_transport, &stream->active_lifetime_));
-    } else {
-      stream->timed_send_transport_adapter_.reset(nullptr);
-    }
-    channel_proxy->RegisterTransport(
-        stream->timed_send_transport_adapter_.get());
+    channel_send->SetRTCP_CNAME(new_config.rtp.c_name);
   }
 
   // Enable the frame encryptor if a new frame encryptor has been provided.
   if (first_time || new_config.frame_encryptor != old_config.frame_encryptor) {
-    channel_proxy->SetFrameEncryptor(new_config.frame_encryptor);
+    channel_send->SetFrameEncryptor(new_config.frame_encryptor);
   }
 
   if (first_time ||
       new_config.rtp.extmap_allow_mixed != old_config.rtp.extmap_allow_mixed) {
-    channel_proxy->SetExtmapAllowMixed(new_config.rtp.extmap_allow_mixed);
+    channel_send->SetExtmapAllowMixed(new_config.rtp.extmap_allow_mixed);
   }
 
   const ExtensionIds old_ids = FindExtensionIds(old_config.rtp.extensions);
   const ExtensionIds new_ids = FindExtensionIds(new_config.rtp.extensions);
   // Audio level indication
   if (first_time || new_ids.audio_level != old_ids.audio_level) {
-    channel_proxy->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
-                                                     new_ids.audio_level);
+    channel_send->SetSendAudioLevelIndicationStatus(new_ids.audio_level != 0,
+                                                    new_ids.audio_level);
   }
   bool transport_seq_num_id_changed =
       new_ids.transport_sequence_number != old_ids.transport_sequence_number;
@@ -317,7 +251,7 @@
       (transport_seq_num_id_changed &&
        !webrtc::field_trial::IsEnabled("WebRTC-Audio-ForceNoTWCC"))) {
     if (!first_time) {
-      channel_proxy->ResetSenderCongestionControlObjects();
+      channel_send->ResetSenderCongestionControlObjects();
     }
 
     RtcpBandwidthObserver* bandwidth_observer = nullptr;
@@ -325,7 +259,7 @@
         new_ids.transport_sequence_number != 0 &&
         !webrtc::field_trial::IsEnabled("WebRTC-Audio-ForceNoTWCC");
     if (has_transport_sequence_number) {
-      channel_proxy->EnableSendTransportSequenceNumber(
+      channel_send->EnableSendTransportSequenceNumber(
           new_ids.transport_sequence_number);
       // Probing in application limited region is only used in combination with
       // send side congestion control, wich depends on feedback packets which
@@ -336,7 +270,7 @@
       }
     }
     if (stream->rtp_transport_) {
-      channel_proxy->RegisterSenderCongestionControlObjects(
+      channel_send->RegisterSenderCongestionControlObjects(
           stream->rtp_transport_, bandwidth_observer);
     }
   }
@@ -344,7 +278,7 @@
   if ((first_time || new_ids.mid != old_ids.mid ||
        new_config.rtp.mid != old_config.rtp.mid) &&
       new_ids.mid != 0 && !new_config.rtp.mid.empty()) {
-    channel_proxy->SetMid(new_config.rtp.mid, new_ids.mid);
+    channel_send->SetMid(new_config.rtp.mid, new_ids.mid);
   }
 
   if (!ReconfigureSendCodec(stream, new_config)) {
@@ -380,7 +314,7 @@
   } else {
     rtp_rtcp_module_->SetAsPartOfAllocation(false);
   }
-  channel_proxy_->StartSend();
+  channel_send_->StartSend();
   sending_ = true;
   audio_state()->AddSendingStream(this, encoder_sample_rate_hz_,
                                   encoder_num_channels_);
@@ -393,14 +327,14 @@
   }
 
   RemoveBitrateObserver();
-  channel_proxy_->StopSend();
+  channel_send_->StopSend();
   sending_ = false;
   audio_state()->RemoveSendingStream(this);
 }
 
 void AudioSendStream::SendAudioData(std::unique_ptr<AudioFrame> audio_frame) {
   RTC_CHECK_RUNS_SERIALIZED(&audio_capture_race_checker_);
-  channel_proxy_->ProcessAndEncodeAudio(std::move(audio_frame));
+  channel_send_->ProcessAndEncodeAudio(std::move(audio_frame));
 }
 
 bool AudioSendStream::SendTelephoneEvent(int payload_type,
@@ -408,14 +342,14 @@
                                          int event,
                                          int duration_ms) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_proxy_->SetSendTelephoneEventPayloadType(payload_type,
-                                                          payload_frequency) &&
-         channel_proxy_->SendTelephoneEventOutband(event, duration_ms);
+  return channel_send_->SetSendTelephoneEventPayloadType(payload_type,
+                                                         payload_frequency) &&
+         channel_send_->SendTelephoneEventOutband(event, duration_ms);
 }
 
 void AudioSendStream::SetMuted(bool muted) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_proxy_->SetInputMute(muted);
+  channel_send_->SetInputMute(muted);
 }
 
 webrtc::AudioSendStream::Stats AudioSendStream::GetStats() const {
@@ -427,9 +361,9 @@
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   webrtc::AudioSendStream::Stats stats;
   stats.local_ssrc = config_.rtp.ssrc;
-  stats.target_bitrate_bps = channel_proxy_->GetBitrate();
+  stats.target_bitrate_bps = channel_send_->GetBitrate();
 
-  webrtc::CallSendStatistics call_stats = channel_proxy_->GetRTCPStatistics();
+  webrtc::CallSendStatistics call_stats = channel_send_->GetRTCPStatistics();
   stats.bytes_sent = call_stats.bytesSent;
   stats.packets_sent = call_stats.packetsSent;
   // RTT isn't known until a RTCP report is received. Until then, VoiceEngine
@@ -443,7 +377,7 @@
     stats.codec_payload_type = spec.payload_type;
 
     // Get data from the last remote RTCP report.
-    for (const auto& block : channel_proxy_->GetRemoteRTCPReportBlocks()) {
+    for (const auto& block : channel_send_->GetRemoteRTCPReportBlocks()) {
       // Lookup report for send ssrc only.
       if (block.source_SSRC == stats.local_ssrc) {
         stats.packets_lost = block.cumulative_num_packets_lost;
@@ -465,7 +399,7 @@
   stats.total_input_duration = input_stats.total_duration;
 
   stats.typing_noise_detected = audio_state()->typing_noise_detected();
-  stats.ana_statistics = channel_proxy_->GetANAStatistics();
+  stats.ana_statistics = channel_send_->GetANAStatistics();
   RTC_DCHECK(audio_state_->audio_processing());
   stats.apm_statistics =
       audio_state_->audio_processing()->GetStatistics(has_remote_tracks);
@@ -482,25 +416,24 @@
   // calls on the worker thread. We should move towards always using a network
   // thread. Then this check can be enabled.
   // RTC_DCHECK(!worker_thread_checker_.CalledOnValidThread());
-  return channel_proxy_->ReceivedRTCPPacket(packet, length);
+  return channel_send_->ReceivedRTCPPacket(packet, length);
 }
 
 uint32_t AudioSendStream::OnBitrateUpdated(BitrateAllocationUpdate update) {
   // A send stream may be allocated a bitrate of zero if the allocator decides
   // to disable it. For now we ignore this decision and keep sending on min
   // bitrate.
-  if (update.bitrate_bps == 0) {
-    update.bitrate_bps = config_.min_bitrate_bps;
+  if (update.target_bitrate.IsZero()) {
+    update.target_bitrate = DataRate::bps(config_.min_bitrate_bps);
   }
-  RTC_DCHECK_GE(update.bitrate_bps,
-                static_cast<uint32_t>(config_.min_bitrate_bps));
+  RTC_DCHECK_GE(update.target_bitrate.bps<int>(), config_.min_bitrate_bps);
   // The bitrate allocator might allocate an higher than max configured bitrate
   // if there is room, to allow for, as example, extra FEC. Ignore that for now.
-  const uint32_t max_bitrate_bps = config_.max_bitrate_bps;
-  if (update.bitrate_bps > max_bitrate_bps)
-    update.bitrate_bps = max_bitrate_bps;
+  const DataRate max_bitrate = DataRate::bps(config_.max_bitrate_bps);
+  if (update.target_bitrate > max_bitrate)
+    update.target_bitrate = max_bitrate;
 
-  channel_proxy_->SetBitrate(update.bitrate_bps, update.bwe_period_ms);
+  channel_send_->OnBitrateAllocation(update);
 
   // The amount of audio protection is not exposed by the encoder, hence
   // always returning 0.
@@ -534,25 +467,24 @@
   // the previously sent value is no longer relevant. This will be taken care
   // of with some refactoring which is now being done.
   if (plr) {
-    channel_proxy_->OnTwccBasedUplinkPacketLossRate(*plr);
+    channel_send_->OnTwccBasedUplinkPacketLossRate(*plr);
   }
   if (rplr) {
-    channel_proxy_->OnRecoverableUplinkPacketLossRate(*rplr);
+    channel_send_->OnRecoverableUplinkPacketLossRate(*rplr);
   }
 }
 
 void AudioSendStream::SetTransportOverhead(int transport_overhead_per_packet) {
   RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_proxy_->SetTransportOverhead(transport_overhead_per_packet);
+  channel_send_->SetTransportOverhead(transport_overhead_per_packet);
 }
 
 RtpState AudioSendStream::GetRtpState() const {
   return rtp_rtcp_module_->GetRtpState();
 }
 
-const voe::ChannelSendProxy& AudioSendStream::GetChannelProxy() const {
-  RTC_DCHECK(channel_proxy_.get());
-  return *channel_proxy_.get();
+const voe::ChannelSendInterface* AudioSendStream::GetChannel() const {
+  return channel_send_.get();
 }
 
 internal::AudioState* AudioSendStream::audio_state() {
@@ -637,8 +569,8 @@
 
   stream->StoreEncoderProperties(encoder->SampleRateHz(),
                                  encoder->NumChannels());
-  stream->channel_proxy_->SetEncoder(new_config.send_codec_spec->payload_type,
-                                     std::move(encoder));
+  stream->channel_send_->SetEncoder(new_config.send_codec_spec->payload_type,
+                                    std::move(encoder));
   return true;
 }
 
@@ -684,7 +616,7 @@
   if (!do_not_update_target_bitrate && new_target_bitrate_bps &&
       new_target_bitrate_bps !=
           old_config.send_codec_spec->target_bitrate_bps) {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       encoder->OnReceivedTargetAudioBitrate(*new_target_bitrate_bps);
     });
   }
@@ -702,7 +634,7 @@
     return;
   }
   if (new_config.audio_network_adaptor_config) {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       if (encoder->EnableAudioNetworkAdaptor(
               *new_config.audio_network_adaptor_config, stream->event_log_)) {
         RTC_DLOG(LS_INFO) << "Audio network adaptor enabled on SSRC "
@@ -712,7 +644,7 @@
       }
     });
   } else {
-    CallEncoder(stream->channel_proxy_, [&](AudioEncoder* encoder) {
+    CallEncoder(stream->channel_send_, [&](AudioEncoder* encoder) {
       encoder->DisableAudioNetworkAdaptor();
     });
     RTC_DLOG(LS_INFO) << "Audio network adaptor disabled on SSRC "
@@ -736,7 +668,7 @@
   }
 
   // Wrap or unwrap the encoder in an AudioEncoderCNG.
-  stream->channel_proxy_->ModifyEncoder(
+  stream->channel_send_->ModifyEncoder(
       [&](std::unique_ptr<AudioEncoder>* encoder_ptr) {
         std::unique_ptr<AudioEncoder> old_encoder(std::move(*encoder_ptr));
         auto sub_encoders = old_encoder->ReclaimContainedEncoders();
diff --git a/audio/audio_send_stream.h b/audio/audio_send_stream.h
index c86a9dc..bf94901 100644
--- a/audio/audio_send_stream.h
+++ b/audio/audio_send_stream.h
@@ -14,7 +14,7 @@
 #include <memory>
 #include <vector>
 
-#include "audio/time_interval.h"
+#include "audio/channel_send.h"
 #include "audio/transport_feedback_packet_loss_tracker.h"
 #include "call/audio_send_stream.h"
 #include "call/audio_state.h"
@@ -30,10 +30,6 @@
 class RtcpRttStats;
 class RtpTransportControllerSendInterface;
 
-namespace voe {
-class ChannelSendProxy;
-}  // namespace voe
-
 namespace internal {
 class AudioState;
 
@@ -49,9 +45,8 @@
                   BitrateAllocatorInterface* bitrate_allocator,
                   RtcEventLog* event_log,
                   RtcpRttStats* rtcp_rtt_stats,
-                  const absl::optional<RtpState>& suspended_rtp_state,
-                  TimeInterval* overall_call_lifetime);
-  // For unit tests, which need to supply a mock channel proxy.
+                  const absl::optional<RtpState>& suspended_rtp_state);
+  // For unit tests, which need to supply a mock ChannelSend.
   AudioSendStream(const webrtc::AudioSendStream::Config& config,
                   const rtc::scoped_refptr<webrtc::AudioState>& audio_state,
                   rtc::TaskQueue* worker_queue,
@@ -60,8 +55,7 @@
                   RtcEventLog* event_log,
                   RtcpRttStats* rtcp_rtt_stats,
                   const absl::optional<RtpState>& suspended_rtp_state,
-                  TimeInterval* overall_call_lifetime,
-                  std::unique_ptr<voe::ChannelSendProxy> channel_proxy);
+                  std::unique_ptr<voe::ChannelSendInterface> channel_send);
   ~AudioSendStream() override;
 
   // webrtc::AudioSendStream implementation.
@@ -93,7 +87,7 @@
   void SetTransportOverhead(int transport_overhead_per_packet);
 
   RtpState GetRtpState() const;
-  const voe::ChannelSendProxy& GetChannelProxy() const;
+  const voe::ChannelSendInterface* GetChannel() const;
 
  private:
   class TimedTransport;
@@ -130,7 +124,7 @@
   rtc::TaskQueue* worker_queue_;
   webrtc::AudioSendStream::Config config_;
   rtc::scoped_refptr<webrtc::AudioState> audio_state_;
-  std::unique_ptr<voe::ChannelSendProxy> channel_proxy_;
+  const std::unique_ptr<voe::ChannelSendInterface> channel_send_;
   RtcEventLog* const event_log_;
 
   int encoder_sample_rate_hz_ = 0;
@@ -147,10 +141,6 @@
   RtpRtcp* rtp_rtcp_module_;
   absl::optional<RtpState> const suspended_rtp_state_;
 
-  std::unique_ptr<TimedTransport> timed_send_transport_adapter_;
-  TimeInterval active_lifetime_;
-  TimeInterval* overall_call_lifetime_ = nullptr;
-
   // RFC 5285: Each distinct extension MUST have a unique ID. The value 0 is
   // reserved for padding and MUST NOT be used as a local identifier.
   // So it should be safe to use 0 here to indicate "not configured".
diff --git a/audio/audio_send_stream_unittest.cc b/audio/audio_send_stream_unittest.cc
index 6a92329..e400ada 100644
--- a/audio/audio_send_stream_unittest.cc
+++ b/audio/audio_send_stream_unittest.cc
@@ -14,7 +14,6 @@
 
 #include "absl/memory/memory.h"
 #include "api/test/mock_frame_encryptor.h"
-#include "api/units/time_delta.h"
 #include "audio/audio_send_stream.h"
 #include "audio/audio_state.h"
 #include "audio/conversion.h"
@@ -28,12 +27,10 @@
 #include "modules/rtp_rtcp/mocks/mock_rtcp_bandwidth_observer.h"
 #include "modules/rtp_rtcp/mocks/mock_rtcp_rtt_stats.h"
 #include "modules/rtp_rtcp/mocks/mock_rtp_rtcp.h"
-#include "rtc_base/fakeclock.h"
 #include "rtc_base/task_queue.h"
 #include "test/gtest.h"
 #include "test/mock_audio_encoder.h"
 #include "test/mock_audio_encoder_factory.h"
-#include "test/mock_transport.h"
 
 namespace webrtc {
 namespace test {
@@ -42,6 +39,7 @@
 using testing::_;
 using testing::Eq;
 using testing::Ne;
+using testing::Field;
 using testing::Invoke;
 using testing::Return;
 using testing::StrEq;
@@ -143,7 +141,7 @@
         new rtc::RefCountedObject<MockAudioDeviceModule>();
     audio_state_ = AudioState::Create(config);
 
-    SetupDefaultChannelProxy(audio_bwe_enabled);
+    SetupDefaultChannelSend(audio_bwe_enabled);
     SetupMockForSetupSendCodec(expect_set_encoder_call);
 
     // Use ISAC as default codec so as to prevent unnecessary |channel_proxy_|
@@ -151,7 +149,6 @@
     stream_config_.send_codec_spec =
         AudioSendStream::Config::SendCodecSpec(kIsacPayloadType, kIsacFormat);
     stream_config_.rtp.ssrc = kSsrc;
-    stream_config_.rtp.nack.rtp_history_ms = 200;
     stream_config_.rtp.c_name = kCName;
     stream_config_.rtp.extensions.push_back(
         RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
@@ -168,8 +165,7 @@
         new internal::AudioSendStream(
             stream_config_, audio_state_, &worker_queue_, &rtp_transport_,
             &bitrate_allocator_, &event_log_, &rtcp_rtt_stats_, absl::nullopt,
-            &active_lifetime_,
-            std::unique_ptr<voe::ChannelSendProxy>(channel_proxy_)));
+            std::unique_ptr<voe::ChannelSendInterface>(channel_send_)));
   }
 
   AudioSendStream::Config& config() { return stream_config_; }
@@ -177,9 +173,8 @@
     return *static_cast<MockAudioEncoderFactory*>(
         stream_config_.encoder_factory.get());
   }
-  MockChannelSendProxy* channel_proxy() { return channel_proxy_; }
+  MockChannelSend* channel_send() { return channel_send_; }
   RtpTransportControllerSendInterface* transport() { return &rtp_transport_; }
-  TimeInterval* active_lifetime() { return &active_lifetime_; }
 
   static void AddBweToConfig(AudioSendStream::Config* config) {
     config->rtp.extensions.push_back(RtpExtension(
@@ -187,48 +182,40 @@
     config->send_codec_spec->transport_cc_enabled = true;
   }
 
-  void SetupDefaultChannelProxy(bool audio_bwe_enabled) {
-    EXPECT_TRUE(channel_proxy_ == nullptr);
-    channel_proxy_ = new testing::StrictMock<MockChannelSendProxy>();
-    EXPECT_CALL(*channel_proxy_, GetRtpRtcp()).WillRepeatedly(Invoke([this]() {
+  void SetupDefaultChannelSend(bool audio_bwe_enabled) {
+    EXPECT_TRUE(channel_send_ == nullptr);
+    channel_send_ = new testing::StrictMock<MockChannelSend>();
+    EXPECT_CALL(*channel_send_, GetRtpRtcp()).WillRepeatedly(Invoke([this]() {
       return &this->rtp_rtcp_;
     }));
-    EXPECT_CALL(*channel_proxy_, SetRTCPStatus(true)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetLocalSSRC(kSsrc)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetNACKStatus(true, 10)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetFrameEncryptor(_)).Times(1);
-    EXPECT_CALL(*channel_proxy_, SetExtmapAllowMixed(false)).Times(1);
-    EXPECT_CALL(*channel_proxy_,
+    EXPECT_CALL(*channel_send_, SetLocalSSRC(kSsrc)).Times(1);
+    EXPECT_CALL(*channel_send_, SetRTCP_CNAME(StrEq(kCName))).Times(1);
+    EXPECT_CALL(*channel_send_, SetFrameEncryptor(_)).Times(1);
+    EXPECT_CALL(*channel_send_, SetExtmapAllowMixed(false)).Times(1);
+    EXPECT_CALL(*channel_send_,
                 SetSendAudioLevelIndicationStatus(true, kAudioLevelId))
         .Times(1);
     EXPECT_CALL(rtp_transport_, GetBandwidthObserver())
         .WillRepeatedly(Return(&bandwidth_observer_));
     if (audio_bwe_enabled) {
-      EXPECT_CALL(*channel_proxy_,
+      EXPECT_CALL(*channel_send_,
                   EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
           .Times(1);
-      EXPECT_CALL(*channel_proxy_,
+      EXPECT_CALL(*channel_send_,
                   RegisterSenderCongestionControlObjects(
                       &rtp_transport_, Eq(&bandwidth_observer_)))
           .Times(1);
     } else {
-      EXPECT_CALL(*channel_proxy_, RegisterSenderCongestionControlObjects(
-                                       &rtp_transport_, Eq(nullptr)))
+      EXPECT_CALL(*channel_send_, RegisterSenderCongestionControlObjects(
+                                      &rtp_transport_, Eq(nullptr)))
           .Times(1);
     }
-    EXPECT_CALL(*channel_proxy_, ResetSenderCongestionControlObjects())
-        .Times(1);
-    {
-      ::testing::InSequence unregister_on_destruction;
-      EXPECT_CALL(*channel_proxy_, RegisterTransport(_)).Times(1);
-      EXPECT_CALL(*channel_proxy_, RegisterTransport(nullptr)).Times(1);
-    }
+    EXPECT_CALL(*channel_send_, ResetSenderCongestionControlObjects()).Times(1);
   }
 
   void SetupMockForSetupSendCodec(bool expect_set_encoder_call) {
     if (expect_set_encoder_call) {
-      EXPECT_CALL(*channel_proxy_, SetEncoderForMock(_, _))
+      EXPECT_CALL(*channel_send_, SetEncoderForMock(_, _))
           .WillOnce(Invoke(
               [this](int payload_type, std::unique_ptr<AudioEncoder>* encoder) {
                 this->audio_encoder_ = std::move(*encoder);
@@ -239,7 +226,7 @@
 
   void SetupMockForModifyEncoder() {
     // Let ModifyEncoder to invoke mock audio encoder.
-    EXPECT_CALL(*channel_proxy_, ModifyEncoder(_))
+    EXPECT_CALL(*channel_send_, ModifyEncoder(_))
         .WillRepeatedly(Invoke(
             [this](rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
                        modifier) {
@@ -249,13 +236,13 @@
   }
 
   void SetupMockForSendTelephoneEvent() {
-    EXPECT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, SetSendTelephoneEventPayloadType(
-                                     kTelephoneEventPayloadType,
-                                     kTelephoneEventPayloadFrequency))
+    EXPECT_TRUE(channel_send_);
+    EXPECT_CALL(*channel_send_, SetSendTelephoneEventPayloadType(
+                                    kTelephoneEventPayloadType,
+                                    kTelephoneEventPayloadFrequency))
         .WillOnce(Return(true));
     EXPECT_CALL(
-        *channel_proxy_,
+        *channel_send_,
         SendTelephoneEventOutband(kTelephoneEventCode, kTelephoneEventDuration))
         .WillOnce(Return(true));
   }
@@ -273,14 +260,14 @@
     block.fraction_lost = 0;
     report_blocks.push_back(block);  // Duplicate SSRC, bad fraction_lost.
 
-    EXPECT_TRUE(channel_proxy_);
-    EXPECT_CALL(*channel_proxy_, GetRTCPStatistics())
+    EXPECT_TRUE(channel_send_);
+    EXPECT_CALL(*channel_send_, GetRTCPStatistics())
         .WillRepeatedly(Return(kCallStats));
-    EXPECT_CALL(*channel_proxy_, GetRemoteRTCPReportBlocks())
+    EXPECT_CALL(*channel_send_, GetRemoteRTCPReportBlocks())
         .WillRepeatedly(Return(report_blocks));
-    EXPECT_CALL(*channel_proxy_, GetANAStatistics())
+    EXPECT_CALL(*channel_send_, GetANAStatistics())
         .WillRepeatedly(Return(ANAStats()));
-    EXPECT_CALL(*channel_proxy_, GetBitrate()).WillRepeatedly(Return(0));
+    EXPECT_CALL(*channel_send_, GetBitrate()).WillRepeatedly(Return(0));
 
     audio_processing_stats_.echo_return_loss = kEchoReturnLoss;
     audio_processing_stats_.echo_return_loss_enhancement =
@@ -300,10 +287,9 @@
  private:
   rtc::scoped_refptr<AudioState> audio_state_;
   AudioSendStream::Config stream_config_;
-  testing::StrictMock<MockChannelSendProxy>* channel_proxy_ = nullptr;
+  testing::StrictMock<MockChannelSend>* channel_send_ = nullptr;
   rtc::scoped_refptr<MockAudioProcessing> audio_processing_;
   AudioProcessingStats audio_processing_stats_;
-  TimeInterval active_lifetime_;
   testing::StrictMock<MockRtcpBandwidthObserver> bandwidth_observer_;
   testing::NiceMock<MockRtcEventLog> event_log_;
   testing::NiceMock<MockRtpTransportControllerSend> rtp_transport_;
@@ -334,11 +320,12 @@
   config.rtp.extmap_allow_mixed = true;
   config.rtp.extensions.push_back(
       RtpExtension(RtpExtension::kAudioLevelUri, kAudioLevelId));
+  config.rtcp_report_interval_ms = 2500;
   EXPECT_EQ(
       "{rtp: {ssrc: 1234, extmap-allow-mixed: true, extensions: [{uri: "
-      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], nack: "
-      "{rtp_history_ms: 0}, c_name: foo_name}, send_transport: null, "
-      "media_transport: null, "
+      "urn:ietf:params:rtp-hdrext:ssrc-audio-level, id: 2}], "
+      "c_name: foo_name}, rtcp_report_interval_ms: 2500, "
+      "send_transport: null, media_transport: null, "
       "min_bitrate_bps: 12000, max_bitrate_bps: 34000, "
       "send_codec_spec: {nack_enabled: true, transport_cc_enabled: false, "
       "cng_payload_type: 42, payload_type: 103, "
@@ -364,7 +351,7 @@
 TEST(AudioSendStreamTest, SetMuted) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(), SetInputMute(true));
+  EXPECT_CALL(*helper.channel_send(), SetInputMute(true));
   send_stream->SetMuted(true);
 }
 
@@ -454,7 +441,7 @@
   helper.config().send_codec_spec->cng_payload_type = 105;
   using ::testing::Invoke;
   std::unique_ptr<AudioEncoder> stolen_encoder;
-  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+  EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
       .WillOnce(
           Invoke([&stolen_encoder](int payload_type,
                                    std::unique_ptr<AudioEncoder>* encoder) {
@@ -474,25 +461,30 @@
 TEST(AudioSendStreamTest, DoesNotPassHigherBitrateThanMaxBitrate) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(),
-              SetBitrate(helper.config().max_bitrate_bps, _));
+  EXPECT_CALL(*helper.channel_send(),
+              OnBitrateAllocation(
+                  Field(&BitrateAllocationUpdate::target_bitrate,
+                        Eq(DataRate::bps(helper.config().max_bitrate_bps)))));
   BitrateAllocationUpdate update;
-  update.bitrate_bps = helper.config().max_bitrate_bps + 5000;
-  update.fraction_loss = 0;
-  update.rtt = 50;
-  update.bwe_period_ms = 6000;
+  update.target_bitrate = DataRate::bps(helper.config().max_bitrate_bps + 5000);
+  update.packet_loss_ratio = 0;
+  update.round_trip_time = TimeDelta::ms(50);
+  update.bwe_period = TimeDelta::ms(6000);
   send_stream->OnBitrateUpdated(update);
 }
 
 TEST(AudioSendStreamTest, ProbingIntervalOnBitrateUpdated) {
   ConfigHelper helper(false, true);
   auto send_stream = helper.CreateAudioSendStream();
-  EXPECT_CALL(*helper.channel_proxy(), SetBitrate(_, 5000));
+
+  EXPECT_CALL(*helper.channel_send(),
+              OnBitrateAllocation(Field(&BitrateAllocationUpdate::bwe_period,
+                                        Eq(TimeDelta::ms(5000)))));
   BitrateAllocationUpdate update;
-  update.bitrate_bps = helper.config().max_bitrate_bps + 5000;
-  update.fraction_loss = 0;
-  update.rtt = 50;
-  update.bwe_period_ms = 5000;
+  update.target_bitrate = DataRate::bps(helper.config().max_bitrate_bps + 5000);
+  update.packet_loss_ratio = 0;
+  update.round_trip_time = TimeDelta::ms(50);
+  update.bwe_period = TimeDelta::ms(5000);
   send_stream->OnBitrateUpdated(update);
 }
 
@@ -504,7 +496,7 @@
   // to be correct, it's instead set-up manually here. Otherwise a simple change
   // to ConfigHelper (say to WillRepeatedly) would silently make this test
   // useless.
-  EXPECT_CALL(*helper.channel_proxy(), SetEncoderForMock(_, _))
+  EXPECT_CALL(*helper.channel_send(), SetEncoderForMock(_, _))
       .WillOnce(Return(true));
 
   helper.config().send_codec_spec =
@@ -519,15 +511,15 @@
   auto send_stream = helper.CreateAudioSendStream();
   auto new_config = helper.config();
   ConfigHelper::AddBweToConfig(&new_config);
-  EXPECT_CALL(*helper.channel_proxy(),
+  EXPECT_CALL(*helper.channel_send(),
               EnableSendTransportSequenceNumber(kTransportSequenceNumberId))
       .Times(1);
   {
     ::testing::InSequence seq;
-    EXPECT_CALL(*helper.channel_proxy(), ResetSenderCongestionControlObjects())
+    EXPECT_CALL(*helper.channel_send(), ResetSenderCongestionControlObjects())
         .Times(1);
-    EXPECT_CALL(*helper.channel_proxy(), RegisterSenderCongestionControlObjects(
-                                             helper.transport(), Ne(nullptr)))
+    EXPECT_CALL(*helper.channel_send(), RegisterSenderCongestionControlObjects(
+                                            helper.transport(), Ne(nullptr)))
         .Times(1);
   }
   send_stream->Reconfigure(new_config);
@@ -543,11 +535,11 @@
   rtc::scoped_refptr<FrameEncryptorInterface> mock_frame_encryptor_0(
       new rtc::RefCountedObject<MockFrameEncryptor>());
   new_config.frame_encryptor = mock_frame_encryptor_0;
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(Ne(nullptr))).Times(1);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
   send_stream->Reconfigure(new_config);
 
   // Not updating the frame encryptor shouldn't force it to reconfigure.
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(_)).Times(0);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(_)).Times(0);
   send_stream->Reconfigure(new_config);
 
   // Updating frame encryptor to a new object should force a call to the proxy.
@@ -555,36 +547,8 @@
       new rtc::RefCountedObject<MockFrameEncryptor>());
   new_config.frame_encryptor = mock_frame_encryptor_1;
   new_config.crypto_options.sframe.require_frame_encryption = true;
-  EXPECT_CALL(*helper.channel_proxy(), SetFrameEncryptor(Ne(nullptr))).Times(1);
+  EXPECT_CALL(*helper.channel_send(), SetFrameEncryptor(Ne(nullptr))).Times(1);
   send_stream->Reconfigure(new_config);
 }
-
-// Checks that AudioSendStream logs the times at which RTP packets are sent
-// through its interface.
-TEST(AudioSendStreamTest, UpdateLifetime) {
-  ConfigHelper helper(false, true);
-
-  MockTransport mock_transport;
-  helper.config().send_transport = &mock_transport;
-
-  Transport* registered_transport;
-  ON_CALL(*helper.channel_proxy(), RegisterTransport(_))
-      .WillByDefault(Invoke([&registered_transport](Transport* transport) {
-        registered_transport = transport;
-      }));
-
-  rtc::ScopedFakeClock fake_clock;
-  constexpr int64_t kTimeBetweenSendRtpCallsMs = 100;
-  {
-    auto send_stream = helper.CreateAudioSendStream();
-    EXPECT_CALL(mock_transport, SendRtp(_, _, _)).Times(2);
-    const PacketOptions options;
-    registered_transport->SendRtp(nullptr, 0, options);
-    fake_clock.AdvanceTime(TimeDelta::ms(kTimeBetweenSendRtpCallsMs));
-    registered_transport->SendRtp(nullptr, 0, options);
-  }
-  EXPECT_TRUE(!helper.active_lifetime()->Empty());
-  EXPECT_EQ(helper.active_lifetime()->Length(), kTimeBetweenSendRtpCallsMs);
-}
 }  // namespace test
 }  // namespace webrtc
diff --git a/audio/channel_receive.cc b/audio/channel_receive.cc
index 704ba79..483147f 100644
--- a/audio/channel_receive.cc
+++ b/audio/channel_receive.cc
@@ -18,14 +18,19 @@
 #include <vector>
 
 #include "absl/memory/memory.h"
+#include "audio/audio_level.h"
 #include "audio/channel_send.h"
 #include "audio/utility/audio_frame_operations.h"
 #include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
 #include "modules/audio_device/include/audio_device.h"
 #include "modules/pacing/packet_router.h"
 #include "modules/rtp_rtcp/include/receive_statistics.h"
+#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
+#include "modules/rtp_rtcp/include/rtp_rtcp.h"
+#include "modules/rtp_rtcp/source/contributing_sources.h"
 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "modules/utility/include/process_thread.h"
@@ -34,6 +39,8 @@
 #include "rtc_base/format_macros.h"
 #include "rtc_base/location.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_minmax.h"
+#include "rtc_base/race_checker.h"
 #include "rtc_base/thread_checker.h"
 #include "rtc_base/timeutils.h"
 #include "system_wrappers/include/metrics.h"
@@ -83,7 +90,187 @@
   return webrtc_header;
 }
 
-}  // namespace
+class ChannelReceive : public ChannelReceiveInterface,
+                       public MediaTransportAudioSinkInterface {
+ public:
+  // Used for receive streams.
+  ChannelReceive(ProcessThread* module_process_thread,
+                 AudioDeviceModule* audio_device_module,
+                 MediaTransportInterface* media_transport,
+                 Transport* rtcp_send_transport,
+                 RtcEventLog* rtc_event_log,
+                 uint32_t remote_ssrc,
+                 size_t jitter_buffer_max_packets,
+                 bool jitter_buffer_fast_playout,
+                 int jitter_buffer_min_delay_ms,
+                 rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+                 absl::optional<AudioCodecPairId> codec_pair_id,
+                 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+                 const webrtc::CryptoOptions& crypto_options);
+  ~ChannelReceive() override;
+
+  void SetSink(AudioSinkInterface* sink) override;
+
+  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs) override;
+
+  // API methods
+
+  void StartPlayout() override;
+  void StopPlayout() override;
+
+  // Codecs
+  bool GetRecCodec(CodecInst* codec) const override;
+
+  bool ReceivedRTCPPacket(const uint8_t* data, size_t length) override;
+
+  // RtpPacketSinkInterface.
+  void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+  // Muting, Volume and Level.
+  void SetChannelOutputVolumeScaling(float scaling) override;
+  int GetSpeechOutputLevelFullRange() const override;
+  // See description of "totalAudioEnergy" in the WebRTC stats spec:
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
+  double GetTotalOutputEnergy() const override;
+  double GetTotalOutputDuration() const override;
+
+  // Stats.
+  NetworkStatistics GetNetworkStatistics() const override;
+  AudioDecodingCallStats GetDecodingCallStatistics() const override;
+
+  // Audio+Video Sync.
+  uint32_t GetDelayEstimate() const override;
+  void SetMinimumPlayoutDelay(int delayMs) override;
+  uint32_t GetPlayoutTimestamp() const override;
+
+  // Produces the transport-related timestamps; current_delay_ms is left unset.
+  absl::optional<Syncable::Info> GetSyncInfo() const override;
+
+  // RTP+RTCP
+  void SetLocalSSRC(unsigned int ssrc) override;
+
+  void RegisterReceiverCongestionControlObjects(
+      PacketRouter* packet_router) override;
+  void ResetReceiverCongestionControlObjects() override;
+
+  CallReceiveStatistics GetRTCPStatistics() const override;
+  void SetNACKStatus(bool enable, int maxNumberOfPackets) override;
+
+  AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+      int sample_rate_hz,
+      AudioFrame* audio_frame) override;
+
+  int PreferredSampleRate() const override;
+
+  // Associate to a send channel.
+  // Used for obtaining RTT for a receive-only channel.
+  void SetAssociatedSendChannel(const ChannelSendInterface* channel) override;
+
+  std::vector<RtpSource> GetSources() const override;
+
+ private:
+  bool ReceivePacket(const uint8_t* packet,
+                     size_t packet_length,
+                     const RTPHeader& header);
+  int ResendPackets(const uint16_t* sequence_numbers, int length);
+  void UpdatePlayoutTimestamp(bool rtcp);
+
+  int GetRtpTimestampRateHz() const;
+  int64_t GetRTT() const;
+
+  // MediaTransportAudioSinkInterface override;
+  void OnData(uint64_t channel_id,
+              MediaTransportEncodedAudioFrame frame) override;
+
+  int32_t OnReceivedPayloadData(const uint8_t* payloadData,
+                                size_t payloadSize,
+                                const WebRtcRTPHeader* rtpHeader);
+
+  bool Playing() const {
+    rtc::CritScope lock(&playing_lock_);
+    return playing_;
+  }
+
+  // Thread checkers document and lock usage of some methods to specific threads
+  // we know about. The goal is to eventually split up voe::ChannelReceive into
+  // parts with single-threaded semantics, and thereby reduce the need for
+  // locks.
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  // Methods accessed from audio and video threads are checked for sequential-
+  // only access. We don't necessarily own and control these threads, so thread
+  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+  // audio thread to another, but access is still sequential.
+  rtc::RaceChecker audio_thread_race_checker_;
+  rtc::RaceChecker video_capture_thread_race_checker_;
+  rtc::CriticalSection _callbackCritSect;
+  rtc::CriticalSection volume_settings_critsect_;
+
+  rtc::CriticalSection playing_lock_;
+  bool playing_ RTC_GUARDED_BY(&playing_lock_) = false;
+
+  RtcEventLog* const event_log_;
+
+  // Indexed by payload type.
+  std::map<uint8_t, int> payload_type_frequencies_;
+
+  std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
+  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+  const uint32_t remote_ssrc_;
+
+  // Info for GetSources and GetSyncInfo is updated on network or worker thread,
+  // queried on the worker thread.
+  rtc::CriticalSection rtp_sources_lock_;
+  ContributingSources contributing_sources_ RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<uint32_t> last_received_rtp_timestamp_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<int64_t> last_received_rtp_system_time_ms_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+  absl::optional<uint8_t> last_received_rtp_audio_level_
+      RTC_GUARDED_BY(&rtp_sources_lock_);
+
+  std::unique_ptr<AudioCodingModule> audio_coding_;
+  AudioSinkInterface* audio_sink_ = nullptr;
+  AudioLevel _outputAudioLevel;
+
+  RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // Timestamp of the audio pulled from NetEq.
+  absl::optional<uint32_t> jitter_buffer_playout_timestamp_;
+
+  rtc::CriticalSection video_sync_lock_;
+  uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
+  uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
+
+  rtc::CriticalSection ts_stats_lock_;
+
+  std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
+  // The rtp timestamp of the first played out audio frame.
+  int64_t capture_start_rtp_time_stamp_;
+  // The capture ntp time (in local timebase) of the first played out audio
+  // frame.
+  int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
+
+  // uses
+  ProcessThread* _moduleProcessThreadPtr;
+  AudioDeviceModule* _audioDeviceModulePtr;
+  float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
+
+  // An associated send channel.
+  rtc::CriticalSection assoc_send_channel_lock_;
+  const ChannelSendInterface* associated_send_channel_
+      RTC_GUARDED_BY(assoc_send_channel_lock_);
+
+  PacketRouter* packet_router_ = nullptr;
+
+  rtc::ThreadChecker construction_thread_;
+
+  MediaTransportInterface* const media_transport_;
+
+  // E2EE Audio Frame Decryption
+  rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
+  webrtc::CryptoOptions crypto_options_;
+};
 
 int32_t ChannelReceive::OnReceivedPayloadData(
     const uint8_t* payloadData,
@@ -92,7 +279,7 @@
   // We should not be receiving any RTP packets if media_transport is set.
   RTC_CHECK(!media_transport_);
 
-  if (!channel_state_.Get().playing) {
+  if (!Playing()) {
     // Avoid inserting into NetEQ when we are not playing. Count the
     // packet as discarded.
     return 0;
@@ -123,7 +310,7 @@
                             MediaTransportEncodedAudioFrame frame) {
   RTC_CHECK(media_transport_);
 
-  if (!channel_state_.Get().playing) {
+  if (!Playing()) {
     // Avoid inserting into NetEQ when we are not playing. Count the
     // packet as discarded.
     return;
@@ -142,11 +329,11 @@
 AudioMixer::Source::AudioFrameInfo ChannelReceive::GetAudioFrameWithInfo(
     int sample_rate_hz,
     AudioFrame* audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   audio_frame->sample_rate_hz_ = sample_rate_hz;
 
-  unsigned int ssrc;
-  RTC_CHECK_EQ(GetRemoteSSRC(ssrc), 0);
-  event_log_->Log(absl::make_unique<RtcEventAudioPlayout>(ssrc));
+  event_log_->Log(absl::make_unique<RtcEventAudioPlayout>(remote_ssrc_));
+
   // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
   bool muted;
   if (audio_coding_->PlayoutData10Ms(audio_frame->sample_rate_hz_, audio_frame,
@@ -248,6 +435,7 @@
 }
 
 int ChannelReceive::PreferredSampleRate() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   // Return the bigger of playout and receive frequency in the ACM.
   return std::max(audio_coding_->ReceiveFrequency(),
                   audio_coding_->PlayoutFrequency());
@@ -262,6 +450,7 @@
     uint32_t remote_ssrc,
     size_t jitter_buffer_max_packets,
     bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
     rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
     absl::optional<AudioCodecPairId> codec_pair_id,
     rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
@@ -284,6 +473,9 @@
       media_transport_(media_transport),
       frame_decryptor_(frame_decryptor),
       crypto_options_(crypto_options) {
+  // TODO(nisse): Use _moduleProcessThreadPtr instead?
+  module_process_thread_checker_.DetachFromThread();
+
   RTC_DCHECK(module_process_thread);
   RTC_DCHECK(audio_device_module);
   AudioCodingModule::Config acm_config;
@@ -291,6 +483,7 @@
   acm_config.neteq_config.codec_pair_id = codec_pair_id;
   acm_config.neteq_config.max_packets_in_buffer = jitter_buffer_max_packets;
   acm_config.neteq_config.enable_fast_accelerate = jitter_buffer_fast_playout;
+  acm_config.neteq_config.min_delay_ms = jitter_buffer_min_delay_ms;
   acm_config.neteq_config.enable_muted_state = true;
   audio_coding_.reset(AudioCodingModule::Create(acm_config));
 
@@ -308,26 +501,9 @@
   _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
   _rtpRtcpModule->SetSendingMediaStatus(false);
   _rtpRtcpModule->SetRemoteSSRC(remote_ssrc_);
-  Init();
-}
 
-ChannelReceive::~ChannelReceive() {
-  Terminate();
-  RTC_DCHECK(!channel_state_.Get().playing);
-}
-
-void ChannelReceive::Init() {
-  channel_state_.Reset();
-
-  // --- Add modules to process thread (for periodic schedulation)
   _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
 
-  // --- ACM initialization
-  int error = audio_coding_->InitializeReceiver();
-  RTC_DCHECK_EQ(0, error);
-
-  // --- RTP/RTCP module initialization
-
   // Ensure that RTCP is enabled by default for the created channel.
   // Note that, the module will keep generating RTCP until it is explicitly
   // disabled by the user.
@@ -341,63 +517,48 @@
   }
 }
 
-void ChannelReceive::Terminate() {
+ChannelReceive::~ChannelReceive() {
   RTC_DCHECK(construction_thread_.CalledOnValidThread());
 
   if (media_transport_) {
     media_transport_->SetReceiveAudioSink(nullptr);
   }
 
-  // Must be called on the same thread as Init().
-  rtp_receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
-
   StopPlayout();
 
-  // The order to safely shutdown modules in a channel is:
-  // 1. De-register callbacks in modules
-  // 2. De-register modules in process thread
-  // 3. Destroy modules
   int error = audio_coding_->RegisterTransportCallback(NULL);
   RTC_DCHECK_EQ(0, error);
 
-  // De-register modules in process thread
   if (_moduleProcessThreadPtr)
     _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
-
-  // End of modules shutdown
 }
 
 void ChannelReceive::SetSink(AudioSinkInterface* sink) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope cs(&_callbackCritSect);
   audio_sink_ = sink;
 }
 
-int32_t ChannelReceive::StartPlayout() {
-  if (channel_state_.Get().playing) {
-    return 0;
-  }
-
-  channel_state_.SetPlaying(true);
-
-  return 0;
+void ChannelReceive::StartPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&playing_lock_);
+  playing_ = true;
 }
 
-int32_t ChannelReceive::StopPlayout() {
-  if (!channel_state_.Get().playing) {
-    return 0;
-  }
-
-  channel_state_.SetPlaying(false);
+void ChannelReceive::StopPlayout() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&playing_lock_);
+  playing_ = false;
   _outputAudioLevel.Clear();
-
-  return 0;
 }
 
-int32_t ChannelReceive::GetRecCodec(CodecInst& codec) {
-  return (audio_coding_->ReceiveCodec(&codec));
+bool ChannelReceive::GetRecCodec(CodecInst* codec) const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  return (audio_coding_->ReceiveCodec(codec) == 0);
 }
 
 std::vector<webrtc::RtpSource> ChannelReceive::GetSources() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   int64_t now_ms = rtc::TimeMillis();
   std::vector<RtpSource> sources;
   {
@@ -415,6 +576,7 @@
 
 void ChannelReceive::SetReceiveCodecs(
     const std::map<int, SdpAudioFormat>& codecs) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   for (const auto& kv : codecs) {
     RTC_DCHECK_GE(kv.second.clockrate_hz, 1000);
     payload_type_frequencies_[kv.first] = kv.second.clockrate_hz;
@@ -422,7 +584,7 @@
   audio_coding_->SetReceiveCodecs(codecs);
 }
 
-// TODO(nisse): Move receive logic up to AudioReceiveStream.
+// May be called on either worker thread or network thread.
 void ChannelReceive::OnRtpPacket(const RtpPacketReceived& packet) {
   int64_t now_ms = rtc::TimeMillis();
   uint8_t audio_level;
@@ -513,7 +675,9 @@
                                &webrtc_rtp_header);
 }
 
-int32_t ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+// May be called on either worker thread or network thread.
+// TODO(nisse): Drop always-true return value.
+bool ChannelReceive::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
   // Store playout timestamp for the received RTCP packet
   UpdatePlayoutTimestamp(true);
 
@@ -523,7 +687,7 @@
   int64_t rtt = GetRTT();
   if (rtt == 0) {
     // Waiting for valid RTT.
-    return 0;
+    return true;
   }
 
   int64_t nack_window_ms = rtt;
@@ -539,46 +703,45 @@
   if (0 != _rtpRtcpModule->RemoteNTP(&ntp_secs, &ntp_frac, NULL, NULL,
                                      &rtp_timestamp)) {
     // Waiting for RTCP.
-    return 0;
+    return true;
   }
 
   {
     rtc::CritScope lock(&ts_stats_lock_);
     ntp_estimator_.UpdateRtcpTimestamp(rtt, ntp_secs, ntp_frac, rtp_timestamp);
   }
-  return 0;
+  return true;
 }
 
 int ChannelReceive::GetSpeechOutputLevelFullRange() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.LevelFullRange();
 }
 
 double ChannelReceive::GetTotalOutputEnergy() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.TotalEnergy();
 }
 
 double ChannelReceive::GetTotalOutputDuration() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   return _outputAudioLevel.TotalDuration();
 }
 
 void ChannelReceive::SetChannelOutputVolumeScaling(float scaling) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope cs(&volume_settings_critsect_);
   _outputGain = scaling;
 }
 
-int ChannelReceive::SetLocalSSRC(unsigned int ssrc) {
+void ChannelReceive::SetLocalSSRC(uint32_t ssrc) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   _rtpRtcpModule->SetSSRC(ssrc);
-  return 0;
-}
-
-// TODO(nisse): Pass ssrc in return value instead.
-int ChannelReceive::GetRemoteSSRC(unsigned int& ssrc) {
-  ssrc = remote_ssrc_;
-  return 0;
 }
 
 void ChannelReceive::RegisterReceiverCongestionControlObjects(
     PacketRouter* packet_router) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   RTC_DCHECK(packet_router);
   RTC_DCHECK(!packet_router_);
   constexpr bool remb_candidate = false;
@@ -587,13 +750,16 @@
 }
 
 void ChannelReceive::ResetReceiverCongestionControlObjects() {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   RTC_DCHECK(packet_router_);
   packet_router_->RemoveReceiveRtpModule(_rtpRtcpModule.get());
   packet_router_ = nullptr;
 }
 
-int ChannelReceive::GetRTPStatistics(CallReceiveStatistics& stats) {
+CallReceiveStatistics ChannelReceive::GetRTCPStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   // --- RtcpStatistics
+  CallReceiveStatistics stats;
 
   // The jitter statistics is updated for each received RTP packet and is
   // based on received packets.
@@ -630,14 +796,15 @@
     rtc::CritScope lock(&ts_stats_lock_);
     stats.capture_start_ntp_time_ms_ = capture_start_ntp_time_ms_;
   }
-  return 0;
+  return stats;
 }
 
-void ChannelReceive::SetNACKStatus(bool enable, int maxNumberOfPackets) {
+void ChannelReceive::SetNACKStatus(bool enable, int max_packets) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   // None of these functions can fail.
-  rtp_receive_statistics_->SetMaxReorderingThreshold(maxNumberOfPackets);
+  rtp_receive_statistics_->SetMaxReorderingThreshold(max_packets);
   if (enable)
-    audio_coding_->EnableNack(maxNumberOfPackets);
+    audio_coding_->EnableNack(max_packets);
   else
     audio_coding_->DisableNack();
 }
@@ -648,54 +815,61 @@
   return _rtpRtcpModule->SendNACK(sequence_numbers, length);
 }
 
-void ChannelReceive::SetAssociatedSendChannel(ChannelSend* channel) {
+void ChannelReceive::SetAssociatedSendChannel(
+    const ChannelSendInterface* channel) {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&assoc_send_channel_lock_);
   associated_send_channel_ = channel;
 }
 
-int ChannelReceive::GetNetworkStatistics(NetworkStatistics& stats) {
-  return audio_coding_->GetNetworkStatistics(&stats);
+NetworkStatistics ChannelReceive::GetNetworkStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  NetworkStatistics stats;
+  int error = audio_coding_->GetNetworkStatistics(&stats);
+  RTC_DCHECK_EQ(0, error);
+  return stats;
 }
 
-void ChannelReceive::GetDecodingCallStatistics(
-    AudioDecodingCallStats* stats) const {
-  audio_coding_->GetDecodingCallStatistics(stats);
+AudioDecodingCallStats ChannelReceive::GetDecodingCallStatistics() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
+  AudioDecodingCallStats stats;
+  audio_coding_->GetDecodingCallStatistics(&stats);
+  return stats;
 }
 
 uint32_t ChannelReceive::GetDelayEstimate() const {
+  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+             module_process_thread_checker_.CalledOnValidThread());
   rtc::CritScope lock(&video_sync_lock_);
   return audio_coding_->FilteredCurrentDelayMs() + playout_delay_ms_;
 }
 
-int ChannelReceive::SetMinimumPlayoutDelay(int delayMs) {
-  if ((delayMs < kVoiceEngineMinMinPlayoutDelayMs) ||
-      (delayMs > kVoiceEngineMaxMinPlayoutDelayMs)) {
+void ChannelReceive::SetMinimumPlayoutDelay(int delay_ms) {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
+  // Limit to range accepted by both VoE and ACM, so we're at least getting as
+  // close as possible, instead of failing.
+  delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
+  if ((delay_ms < kVoiceEngineMinMinPlayoutDelayMs) ||
+      (delay_ms > kVoiceEngineMaxMinPlayoutDelayMs)) {
     RTC_DLOG(LS_ERROR) << "SetMinimumPlayoutDelay() invalid min delay";
-    return -1;
+    return;
   }
-  if (audio_coding_->SetMinimumPlayoutDelay(delayMs) != 0) {
+  if (audio_coding_->SetMinimumPlayoutDelay(delay_ms) != 0) {
     RTC_DLOG(LS_ERROR)
         << "SetMinimumPlayoutDelay() failed to set min playout delay";
-    return -1;
   }
-  return 0;
 }
 
-int ChannelReceive::GetPlayoutTimestamp(unsigned int& timestamp) {
-  uint32_t playout_timestamp_rtp = 0;
+uint32_t ChannelReceive::GetPlayoutTimestamp() const {
+  RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
   {
     rtc::CritScope lock(&video_sync_lock_);
-    playout_timestamp_rtp = playout_timestamp_rtp_;
+    return playout_timestamp_rtp_;
   }
-  if (playout_timestamp_rtp == 0) {
-    RTC_DLOG(LS_ERROR) << "GetPlayoutTimestamp() failed to retrieve timestamp";
-    return -1;
-  }
-  timestamp = playout_timestamp_rtp;
-  return 0;
 }
 
 absl::optional<Syncable::Info> ChannelReceive::GetSyncInfo() const {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
   Syncable::Info info;
   if (_rtpRtcpModule->RemoteNTP(&info.capture_time_ntp_secs,
                                 &info.capture_time_ntp_frac, nullptr, nullptr,
@@ -757,6 +931,14 @@
 }
 
 int64_t ChannelReceive::GetRTT() const {
+  if (media_transport_) {
+    auto target_rate = media_transport_->GetLatestTargetTransferRate();
+    if (target_rate.has_value()) {
+      return target_rate->network_estimate.round_trip_time.ms();
+    }
+
+    return 0;
+  }
   RtcpMode method = _rtpRtcpModule->RTCP();
   if (method == RtcpMode::kOff) {
     return 0;
@@ -788,5 +970,29 @@
   return rtt;
 }
 
+}  // namespace
+
+std::unique_ptr<ChannelReceiveInterface> CreateChannelReceive(
+    ProcessThread* module_process_thread,
+    AudioDeviceModule* audio_device_module,
+    MediaTransportInterface* media_transport,
+    Transport* rtcp_send_transport,
+    RtcEventLog* rtc_event_log,
+    uint32_t remote_ssrc,
+    size_t jitter_buffer_max_packets,
+    bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+    absl::optional<AudioCodecPairId> codec_pair_id,
+    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+    const webrtc::CryptoOptions& crypto_options) {
+  return absl::make_unique<ChannelReceive>(
+      module_process_thread, audio_device_module, media_transport,
+      rtcp_send_transport, rtc_event_log, remote_ssrc,
+      jitter_buffer_max_packets, jitter_buffer_fast_playout,
+      jitter_buffer_min_delay_ms, decoder_factory, codec_pair_id,
+      frame_decryptor, crypto_options);
+}
+
 }  // namespace voe
 }  // namespace webrtc
diff --git a/audio/channel_receive.h b/audio/channel_receive.h
index 0c50962..9027623 100644
--- a/audio/channel_receive.h
+++ b/audio/channel_receive.h
@@ -17,24 +17,18 @@
 
 #include "absl/types/optional.h"
 #include "api/audio/audio_mixer.h"
+#include "api/audio_codecs/audio_decoder_factory.h"
 #include "api/call/audio_sink.h"
 #include "api/call/transport.h"
 #include "api/crypto/cryptooptions.h"
 #include "api/media_transport_interface.h"
 #include "api/rtpreceiverinterface.h"
-#include "audio/audio_level.h"
+#include "call/rtp_packet_sink_interface.h"
 #include "call/syncable.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/rtp_rtcp/include/remote_ntp_time_estimator.h"
-#include "modules/rtp_rtcp/include/rtp_header_parser.h"
-#include "modules/rtp_rtcp/include/rtp_rtcp.h"
-#include "modules/rtp_rtcp/source/contributing_sources.h"
-#include "rtc_base/criticalsection.h"
-#include "rtc_base/thread_checker.h"
 
 // TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
-// warnings about use of unsigned short, and non-const reference arguments.
+// warnings about use of unsigned short.
 // These need cleanup, in a separate cl.
 
 namespace rtc {
@@ -68,210 +62,85 @@
 
 namespace voe {
 
-class ChannelSend;
+class ChannelSendInterface;
 
-// Helper class to simplify locking scheme for members that are accessed from
-// multiple threads.
-// Example: a member can be set on thread T1 and read by an internal audio
-// thread T2. Accessing the member via this class ensures that we are
-// safe and also avoid TSan v2 warnings.
-class ChannelReceiveState {
+// Interface class needed for AudioReceiveStream tests that use a
+// MockChannelReceive.
+
+class ChannelReceiveInterface : public RtpPacketSinkInterface {
  public:
-  struct State {
-    bool playing = false;
-  };
+  virtual ~ChannelReceiveInterface() = default;
 
-  ChannelReceiveState() {}
-  virtual ~ChannelReceiveState() {}
+  virtual void SetSink(AudioSinkInterface* sink) = 0;
 
-  void Reset() {
-    rtc::CritScope lock(&lock_);
-    state_ = State();
-  }
+  virtual void SetReceiveCodecs(
+      const std::map<int, SdpAudioFormat>& codecs) = 0;
 
-  State Get() const {
-    rtc::CritScope lock(&lock_);
-    return state_;
-  }
+  virtual void StartPlayout() = 0;
+  virtual void StopPlayout() = 0;
 
-  void SetPlaying(bool enable) {
-    rtc::CritScope lock(&lock_);
-    state_.playing = enable;
-  }
+  virtual bool GetRecCodec(CodecInst* codec) const = 0;
 
- private:
-  rtc::CriticalSection lock_;
-  State state_;
-};
+  virtual bool ReceivedRTCPPacket(const uint8_t* data, size_t length) = 0;
 
-class ChannelReceive : public RtpData, public MediaTransportAudioSinkInterface {
- public:
-  // Used for receive streams.
-  ChannelReceive(ProcessThread* module_process_thread,
-                 AudioDeviceModule* audio_device_module,
-                 MediaTransportInterface* media_transport,
-                 Transport* rtcp_send_transport,
-                 RtcEventLog* rtc_event_log,
-                 uint32_t remote_ssrc,
-                 size_t jitter_buffer_max_packets,
-                 bool jitter_buffer_fast_playout,
-                 rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
-                 absl::optional<AudioCodecPairId> codec_pair_id,
-                 rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
-                 const webrtc::CryptoOptions& crypto_options);
-  virtual ~ChannelReceive();
-
-  void SetSink(AudioSinkInterface* sink);
-
-  void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
-
-  // API methods
-
-  // VoEBase
-  int32_t StartPlayout();
-  int32_t StopPlayout();
-
-  // Codecs
-  int32_t GetRecCodec(CodecInst& codec);  // NOLINT
-
-  // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
-  int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
-  void OnRtpPacket(const RtpPacketReceived& packet);
-
-  // Muting, Volume and Level.
-  void SetChannelOutputVolumeScaling(float scaling);
-  int GetSpeechOutputLevelFullRange() const;
+  virtual void SetChannelOutputVolumeScaling(float scaling) = 0;
+  virtual int GetSpeechOutputLevelFullRange() const = 0;
   // See description of "totalAudioEnergy" in the WebRTC stats spec:
   // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
-  double GetTotalOutputEnergy() const;
-  double GetTotalOutputDuration() const;
+  virtual double GetTotalOutputEnergy() const = 0;
+  virtual double GetTotalOutputDuration() const = 0;
 
   // Stats.
-  int GetNetworkStatistics(NetworkStatistics& stats);  // NOLINT
-  void GetDecodingCallStatistics(AudioDecodingCallStats* stats) const;
+  virtual NetworkStatistics GetNetworkStatistics() const = 0;
+  virtual AudioDecodingCallStats GetDecodingCallStatistics() const = 0;
 
   // Audio+Video Sync.
-  uint32_t GetDelayEstimate() const;
-  int SetMinimumPlayoutDelay(int delayMs);
-  int GetPlayoutTimestamp(unsigned int& timestamp);  // NOLINT
+  virtual uint32_t GetDelayEstimate() const = 0;
+  virtual void SetMinimumPlayoutDelay(int delay_ms) = 0;
+  virtual uint32_t GetPlayoutTimestamp() const = 0;
 
   // Produces the transport-related timestamps; current_delay_ms is left unset.
-  absl::optional<Syncable::Info> GetSyncInfo() const;
+  virtual absl::optional<Syncable::Info> GetSyncInfo() const = 0;
 
   // RTP+RTCP
-  int SetLocalSSRC(unsigned int ssrc);
+  virtual void SetLocalSSRC(uint32_t ssrc) = 0;
 
-  void RegisterReceiverCongestionControlObjects(PacketRouter* packet_router);
-  void ResetReceiverCongestionControlObjects();
+  virtual void RegisterReceiverCongestionControlObjects(
+      PacketRouter* packet_router) = 0;
+  virtual void ResetReceiverCongestionControlObjects() = 0;
 
-  int GetRTPStatistics(CallReceiveStatistics& stats);  // NOLINT
-  void SetNACKStatus(bool enable, int maxNumberOfPackets);
+  virtual CallReceiveStatistics GetRTCPStatistics() const = 0;
+  virtual void SetNACKStatus(bool enable, int max_packets) = 0;
 
-  // MediaTransportAudioSinkInterface override;
-  void OnData(uint64_t channel_id,
-              MediaTransportEncodedAudioFrame frame) override;
-
-  // From RtpData in the RTP/RTCP module
-  int32_t OnReceivedPayloadData(const uint8_t* payloadData,
-                                size_t payloadSize,
-                                const WebRtcRTPHeader* rtpHeader) override;
-
-  // From AudioMixer::Source.
-  AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
+  virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
       int sample_rate_hz,
-      AudioFrame* audio_frame);
+      AudioFrame* audio_frame) = 0;
 
-  int PreferredSampleRate() const;
+  virtual int PreferredSampleRate() const = 0;
 
   // Associate to a send channel.
   // Used for obtaining RTT for a receive-only channel.
-  void SetAssociatedSendChannel(ChannelSend* channel);
+  virtual void SetAssociatedSendChannel(
+      const ChannelSendInterface* channel) = 0;
 
-  std::vector<RtpSource> GetSources() const;
-
- private:
-  void Init();
-  void Terminate();
-
-  int GetRemoteSSRC(unsigned int& ssrc);  // NOLINT
-
-  bool ReceivePacket(const uint8_t* packet,
-                     size_t packet_length,
-                     const RTPHeader& header);
-  int ResendPackets(const uint16_t* sequence_numbers, int length);
-  void UpdatePlayoutTimestamp(bool rtcp);
-
-  int GetRtpTimestampRateHz() const;
-  int64_t GetRTT() const;
-
-  rtc::CriticalSection _callbackCritSect;
-  rtc::CriticalSection volume_settings_critsect_;
-
-  ChannelReceiveState channel_state_;
-
-  RtcEventLog* const event_log_;
-
-  // Indexed by payload type.
-  std::map<uint8_t, int> payload_type_frequencies_;
-
-  std::unique_ptr<ReceiveStatistics> rtp_receive_statistics_;
-  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
-  const uint32_t remote_ssrc_;
-
-  // Info for GetSources and GetSyncInfo is updated on network or worker thread,
-  // queried on the worker thread.
-  rtc::CriticalSection rtp_sources_lock_;
-  ContributingSources contributing_sources_ RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<uint32_t> last_received_rtp_timestamp_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<int64_t> last_received_rtp_system_time_ms_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-  absl::optional<uint8_t> last_received_rtp_audio_level_
-      RTC_GUARDED_BY(&rtp_sources_lock_);
-
-  std::unique_ptr<AudioCodingModule> audio_coding_;
-  AudioSinkInterface* audio_sink_ = nullptr;
-  AudioLevel _outputAudioLevel;
-
-  RemoteNtpTimeEstimator ntp_estimator_ RTC_GUARDED_BY(ts_stats_lock_);
-
-  // Timestamp of the audio pulled from NetEq.
-  absl::optional<uint32_t> jitter_buffer_playout_timestamp_;
-
-  rtc::CriticalSection video_sync_lock_;
-  uint32_t playout_timestamp_rtp_ RTC_GUARDED_BY(video_sync_lock_);
-  uint32_t playout_delay_ms_ RTC_GUARDED_BY(video_sync_lock_);
-
-  rtc::CriticalSection ts_stats_lock_;
-
-  std::unique_ptr<rtc::TimestampWrapAroundHandler> rtp_ts_wraparound_handler_;
-  // The rtp timestamp of the first played out audio frame.
-  int64_t capture_start_rtp_time_stamp_;
-  // The capture ntp time (in local timebase) of the first played out audio
-  // frame.
-  int64_t capture_start_ntp_time_ms_ RTC_GUARDED_BY(ts_stats_lock_);
-
-  // uses
-  ProcessThread* _moduleProcessThreadPtr;
-  AudioDeviceModule* _audioDeviceModulePtr;
-  float _outputGain RTC_GUARDED_BY(volume_settings_critsect_);
-
-  // An associated send channel.
-  rtc::CriticalSection assoc_send_channel_lock_;
-  ChannelSend* associated_send_channel_
-      RTC_GUARDED_BY(assoc_send_channel_lock_);
-
-  PacketRouter* packet_router_ = nullptr;
-
-  rtc::ThreadChecker construction_thread_;
-
-  MediaTransportInterface* const media_transport_;
-
-  // E2EE Audio Frame Decryption
-  rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor_;
-  webrtc::CryptoOptions crypto_options_;
+  virtual std::vector<RtpSource> GetSources() const = 0;
 };
 
+std::unique_ptr<ChannelReceiveInterface> CreateChannelReceive(
+    ProcessThread* module_process_thread,
+    AudioDeviceModule* audio_device_module,
+    MediaTransportInterface* media_transport,
+    Transport* rtcp_send_transport,
+    RtcEventLog* rtc_event_log,
+    uint32_t remote_ssrc,
+    size_t jitter_buffer_max_packets,
+    bool jitter_buffer_fast_playout,
+    int jitter_buffer_min_delay_ms,
+    rtc::scoped_refptr<AudioDecoderFactory> decoder_factory,
+    absl::optional<AudioCodecPairId> codec_pair_id,
+    rtc::scoped_refptr<FrameDecryptorInterface> frame_decryptor,
+    const webrtc::CryptoOptions& crypto_options);
+
 }  // namespace voe
 }  // namespace webrtc
 
diff --git a/audio/channel_receive_proxy.cc b/audio/channel_receive_proxy.cc
deleted file mode 100644
index 1dee640..0000000
--- a/audio/channel_receive_proxy.cc
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/channel_receive_proxy.h"
-
-#include <utility>
-
-#include "api/call/audio_sink.h"
-#include "audio/channel_send_proxy.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/logging.h"
-#include "rtc_base/numerics/safe_minmax.h"
-
-namespace webrtc {
-namespace voe {
-ChannelReceiveProxy::ChannelReceiveProxy() {}
-
-ChannelReceiveProxy::ChannelReceiveProxy(
-    std::unique_ptr<ChannelReceive> channel)
-    : channel_(std::move(channel)) {
-  RTC_DCHECK(channel_);
-  module_process_thread_checker_.DetachFromThread();
-}
-
-ChannelReceiveProxy::~ChannelReceiveProxy() {}
-
-void ChannelReceiveProxy::SetLocalSSRC(uint32_t ssrc) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetLocalSSRC(ssrc);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelReceiveProxy::SetNACKStatus(bool enable, int max_packets) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetNACKStatus(enable, max_packets);
-}
-
-CallReceiveStatistics ChannelReceiveProxy::GetRTCPStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  CallReceiveStatistics stats = {0};
-  int error = channel_->GetRTPStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-bool ChannelReceiveProxy::ReceivedRTCPPacket(const uint8_t* packet,
-                                             size_t length) {
-  // May be called on either worker thread or network thread.
-  return channel_->ReceivedRTCPPacket(packet, length) == 0;
-}
-
-void ChannelReceiveProxy::RegisterReceiverCongestionControlObjects(
-    PacketRouter* packet_router) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterReceiverCongestionControlObjects(packet_router);
-}
-
-void ChannelReceiveProxy::ResetReceiverCongestionControlObjects() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ResetReceiverCongestionControlObjects();
-}
-
-NetworkStatistics ChannelReceiveProxy::GetNetworkStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  NetworkStatistics stats = {0};
-  int error = channel_->GetNetworkStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-AudioDecodingCallStats ChannelReceiveProxy::GetDecodingCallStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  AudioDecodingCallStats stats;
-  channel_->GetDecodingCallStatistics(&stats);
-  return stats;
-}
-
-int ChannelReceiveProxy::GetSpeechOutputLevelFullRange() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetSpeechOutputLevelFullRange();
-}
-
-double ChannelReceiveProxy::GetTotalOutputEnergy() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetTotalOutputEnergy();
-}
-
-double ChannelReceiveProxy::GetTotalOutputDuration() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetTotalOutputDuration();
-}
-
-uint32_t ChannelReceiveProxy::GetDelayEstimate() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
-             module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetDelayEstimate();
-}
-
-void ChannelReceiveProxy::SetReceiveCodecs(
-    const std::map<int, SdpAudioFormat>& codecs) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetReceiveCodecs(codecs);
-}
-
-void ChannelReceiveProxy::SetSink(AudioSinkInterface* sink) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetSink(sink);
-}
-
-void ChannelReceiveProxy::OnRtpPacket(const RtpPacketReceived& packet) {
-  // May be called on either worker thread or network thread.
-  channel_->OnRtpPacket(packet);
-}
-
-void ChannelReceiveProxy::SetChannelOutputVolumeScaling(float scaling) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetChannelOutputVolumeScaling(scaling);
-}
-
-AudioMixer::Source::AudioFrameInfo ChannelReceiveProxy::GetAudioFrameWithInfo(
-    int sample_rate_hz,
-    AudioFrame* audio_frame) {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->GetAudioFrameWithInfo(sample_rate_hz, audio_frame);
-}
-
-int ChannelReceiveProxy::PreferredSampleRate() const {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->PreferredSampleRate();
-}
-
-void ChannelReceiveProxy::AssociateSendChannel(
-    const ChannelSendProxy& send_channel_proxy) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetAssociatedSendChannel(send_channel_proxy.GetChannel());
-}
-
-void ChannelReceiveProxy::DisassociateSendChannel() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetAssociatedSendChannel(nullptr);
-}
-
-absl::optional<Syncable::Info> ChannelReceiveProxy::GetSyncInfo() const {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetSyncInfo();
-}
-
-uint32_t ChannelReceiveProxy::GetPlayoutTimestamp() const {
-  RTC_DCHECK_RUNS_SERIALIZED(&video_capture_thread_race_checker_);
-  unsigned int timestamp = 0;
-  int error = channel_->GetPlayoutTimestamp(timestamp);
-  RTC_DCHECK(!error || timestamp == 0);
-  return timestamp;
-}
-
-void ChannelReceiveProxy::SetMinimumPlayoutDelay(int delay_ms) {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  // Limit to range accepted by both VoE and ACM, so we're at least getting as
-  // close as possible, instead of failing.
-  delay_ms = rtc::SafeClamp(delay_ms, 0, 10000);
-  int error = channel_->SetMinimumPlayoutDelay(delay_ms);
-  if (0 != error) {
-    RTC_LOG(LS_WARNING) << "Error setting minimum playout delay.";
-  }
-}
-
-bool ChannelReceiveProxy::GetRecCodec(CodecInst* codec_inst) const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetRecCodec(*codec_inst) == 0;
-}
-
-std::vector<RtpSource> ChannelReceiveProxy::GetSources() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetSources();
-}
-
-void ChannelReceiveProxy::StartPlayout() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StartPlayout();
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelReceiveProxy::StopPlayout() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StopPlayout();
-  RTC_DCHECK_EQ(0, error);
-}
-}  // namespace voe
-}  // namespace webrtc
diff --git a/audio/channel_receive_proxy.h b/audio/channel_receive_proxy.h
deleted file mode 100644
index 8ebacc3..0000000
--- a/audio/channel_receive_proxy.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_CHANNEL_RECEIVE_PROXY_H_
-#define AUDIO_CHANNEL_RECEIVE_PROXY_H_
-
-#include <map>
-#include <memory>
-#include <vector>
-
-#include "api/audio/audio_mixer.h"
-#include "api/rtpreceiverinterface.h"
-#include "audio/channel_receive.h"
-#include "call/rtp_packet_sink_interface.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/race_checker.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class AudioSinkInterface;
-class PacketRouter;
-class RtpPacketReceived;
-class Transport;
-
-namespace voe {
-
-class ChannelSendProxy;
-
-// This class provides the "view" of a voe::Channel that we need to implement
-// webrtc::AudioReceiveStream. It serves two purposes:
-//  1. Allow mocking just the interfaces used, instead of the entire
-//     voe::Channel class.
-//  2. Provide a refined interface for the stream classes, including assumptions
-//     on return values and input adaptation.
-class ChannelReceiveProxy : public RtpPacketSinkInterface {
- public:
-  ChannelReceiveProxy();
-  explicit ChannelReceiveProxy(std::unique_ptr<ChannelReceive> channel);
-  virtual ~ChannelReceiveProxy();
-
-  // Shared with ChannelSendProxy
-  virtual void SetLocalSSRC(uint32_t ssrc);
-  virtual void SetNACKStatus(bool enable, int max_packets);
-  virtual CallReceiveStatistics GetRTCPStatistics() const;
-  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
-
-  virtual void RegisterReceiverCongestionControlObjects(
-      PacketRouter* packet_router);
-  virtual void ResetReceiverCongestionControlObjects();
-  virtual NetworkStatistics GetNetworkStatistics() const;
-  virtual AudioDecodingCallStats GetDecodingCallStatistics() const;
-  virtual int GetSpeechOutputLevelFullRange() const;
-  // See description of "totalAudioEnergy" in the WebRTC stats spec:
-  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats-totalaudioenergy
-  virtual double GetTotalOutputEnergy() const;
-  virtual double GetTotalOutputDuration() const;
-  virtual uint32_t GetDelayEstimate() const;
-  virtual void SetReceiveCodecs(const std::map<int, SdpAudioFormat>& codecs);
-  virtual void SetSink(AudioSinkInterface* sink);
-
-  // Implements RtpPacketSinkInterface
-  void OnRtpPacket(const RtpPacketReceived& packet) override;
-
-  virtual void SetChannelOutputVolumeScaling(float scaling);
-  virtual AudioMixer::Source::AudioFrameInfo GetAudioFrameWithInfo(
-      int sample_rate_hz,
-      AudioFrame* audio_frame);
-  virtual int PreferredSampleRate() const;
-  virtual void AssociateSendChannel(const ChannelSendProxy& send_channel_proxy);
-  virtual void DisassociateSendChannel();
-
-  // Produces the transport-related timestamps; current_delay_ms is left unset.
-  absl::optional<Syncable::Info> GetSyncInfo() const;
-  virtual uint32_t GetPlayoutTimestamp() const;
-  virtual void SetMinimumPlayoutDelay(int delay_ms);
-  virtual bool GetRecCodec(CodecInst* codec_inst) const;
-  virtual std::vector<webrtc::RtpSource> GetSources() const;
-  virtual void StartPlayout();
-  virtual void StopPlayout();
-
- private:
-  // Thread checkers document and lock usage of some methods on voe::Channel to
-  // specific threads we know about. The goal is to eventually split up
-  // voe::Channel into parts with single-threaded semantics, and thereby reduce
-  // the need for locks.
-  rtc::ThreadChecker worker_thread_checker_;
-  rtc::ThreadChecker module_process_thread_checker_;
-  // Methods accessed from audio and video threads are checked for sequential-
-  // only access. We don't necessarily own and control these threads, so thread
-  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
-  // audio thread to another, but access is still sequential.
-  rtc::RaceChecker audio_thread_race_checker_;
-  rtc::RaceChecker video_capture_thread_race_checker_;
-  std::unique_ptr<ChannelReceive> channel_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelReceiveProxy);
-};
-}  // namespace voe
-}  // namespace webrtc
-
-#endif  // AUDIO_CHANNEL_RECEIVE_PROXY_H_
diff --git a/audio/channel_send.cc b/audio/channel_send.cc
index c0de939..c458fe4 100644
--- a/audio/channel_send.cc
+++ b/audio/channel_send.cc
@@ -19,12 +19,15 @@
 
 #include "absl/memory/memory.h"
 #include "api/array_view.h"
+#include "api/call/transport.h"
 #include "api/crypto/frameencryptorinterface.h"
 #include "audio/utility/audio_frame_operations.h"
 #include "call/rtp_transport_controller_send_interface.h"
 #include "logging/rtc_event_log/events/rtc_event_audio_playout.h"
 #include "logging/rtc_event_log/rtc_event_log.h"
 #include "modules/audio_coding/audio_network_adaptor/include/audio_network_adaptor_config.h"
+#include "modules/audio_coding/include/audio_coding_module.h"
+#include "modules/audio_processing/rms_level.h"
 #include "modules/pacing/packet_router.h"
 #include "modules/utility/include/process_thread.h"
 #include "rtc_base/checks.h"
@@ -33,6 +36,8 @@
 #include "rtc_base/format_macros.h"
 #include "rtc_base/location.h"
 #include "rtc_base/logging.h"
+#include "rtc_base/numerics/safe_conversions.h"
+#include "rtc_base/race_checker.h"
 #include "rtc_base/rate_limiter.h"
 #include "rtc_base/task_queue.h"
 #include "rtc_base/thread_checker.h"
@@ -66,7 +71,241 @@
   }
 }
 
-}  // namespace
+class RtpPacketSenderProxy;
+class TransportFeedbackProxy;
+class TransportSequenceNumberProxy;
+class VoERtcpObserver;
+
+class ChannelSend
+    : public ChannelSendInterface,
+      public Transport,
+      public OverheadObserver,
+      public AudioPacketizationCallback,  // receive encoded packets from the
+                                          // ACM
+      public TargetTransferRateObserver {
+ public:
+  // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend
+  // declaration.
+  friend class VoERtcpObserver;
+
+  ChannelSend(rtc::TaskQueue* encoder_queue,
+              ProcessThread* module_process_thread,
+              MediaTransportInterface* media_transport,
+              Transport* rtp_transport,
+              RtcpRttStats* rtcp_rtt_stats,
+              RtcEventLog* rtc_event_log,
+              FrameEncryptorInterface* frame_encryptor,
+              const webrtc::CryptoOptions& crypto_options,
+              bool extmap_allow_mixed,
+              int rtcp_report_interval_ms);
+
+  ~ChannelSend() override;
+
+  // Send using this encoder, with this payload type.
+  bool SetEncoder(int payload_type,
+                  std::unique_ptr<AudioEncoder> encoder) override;
+  void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
+                         modifier) override;
+
+  // API methods
+  void StartSend() override;
+  void StopSend() override;
+
+  // Codecs
+  void OnBitrateAllocation(BitrateAllocationUpdate update) override;
+  int GetBitrate() const override;
+
+  // Network
+  bool ReceivedRTCPPacket(const uint8_t* data, size_t length) override;
+
+  // Muting, Volume and Level.
+  void SetInputMute(bool enable) override;
+
+  // Stats.
+  ANAStats GetANAStatistics() const override;
+
+  // Used by AudioSendStream.
+  RtpRtcp* GetRtpRtcp() const override;
+
+  // DTMF.
+  bool SendTelephoneEventOutband(int event, int duration_ms) override;
+  bool SetSendTelephoneEventPayloadType(int payload_type,
+                                        int payload_frequency) override;
+
+  // RTP+RTCP
+  void SetLocalSSRC(uint32_t ssrc) override;
+  void SetMid(const std::string& mid, int extension_id) override;
+  void SetExtmapAllowMixed(bool extmap_allow_mixed) override;
+  void SetSendAudioLevelIndicationStatus(bool enable, int id) override;
+  void EnableSendTransportSequenceNumber(int id) override;
+
+  void RegisterSenderCongestionControlObjects(
+      RtpTransportControllerSendInterface* transport,
+      RtcpBandwidthObserver* bandwidth_observer) override;
+  void ResetSenderCongestionControlObjects() override;
+  void SetRTCP_CNAME(absl::string_view c_name) override;
+  std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const override;
+  CallSendStatistics GetRTCPStatistics() const override;
+
+  // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
+  // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
+  // the actual processing of the audio takes place. The processing mainly
+  // consists of encoding and preparing the result for sending by adding it to a
+  // send queue.
+  // The main reason for using a task queue here is to release the native,
+  // OS-specific, audio capture thread as soon as possible to ensure that it
+  // can go back to sleep and be prepared to deliver an new captured audio
+  // packet.
+  void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) override;
+
+  void SetTransportOverhead(size_t transport_overhead_per_packet) override;
+
+  // The existence of this function alongside OnUplinkPacketLossRate is
+  // a compromise. We want the encoder to be agnostic of the PLR source, but
+  // we also don't want it to receive conflicting information from TWCC and
+  // from RTCP-XR.
+  void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) override;
+
+  void OnRecoverableUplinkPacketLossRate(
+      float recoverable_packet_loss_rate) override;
+
+  int64_t GetRTT() const override;
+
+  // E2EE Custom Audio Frame Encryption
+  void SetFrameEncryptor(
+      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) override;
+
+ private:
+  class ProcessAndEncodeAudioTask;
+
+  // From AudioPacketizationCallback in the ACM
+  int32_t SendData(FrameType frameType,
+                   uint8_t payloadType,
+                   uint32_t timeStamp,
+                   const uint8_t* payloadData,
+                   size_t payloadSize,
+                   const RTPFragmentationHeader* fragmentation) override;
+
+  // From Transport (called by the RTP/RTCP module)
+  bool SendRtp(const uint8_t* data,
+               size_t len,
+               const PacketOptions& packet_options) override;
+  bool SendRtcp(const uint8_t* data, size_t len) override;
+
+  // From OverheadObserver in the RTP/RTCP module
+  void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
+
+  void OnUplinkPacketLossRate(float packet_loss_rate);
+  bool InputMute() const;
+
+  int SetSendRtpHeaderExtension(bool enable, RTPExtensionType type, int id);
+
+  void UpdateOverheadForEncoder()
+      RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
+
+  int32_t SendRtpAudio(FrameType frameType,
+                       uint8_t payloadType,
+                       uint32_t timeStamp,
+                       rtc::ArrayView<const uint8_t> payload,
+                       const RTPFragmentationHeader* fragmentation);
+
+  int32_t SendMediaTransportAudio(FrameType frameType,
+                                  uint8_t payloadType,
+                                  uint32_t timeStamp,
+                                  rtc::ArrayView<const uint8_t> payload,
+                                  const RTPFragmentationHeader* fragmentation);
+
+  // Return media transport or nullptr if using RTP.
+  MediaTransportInterface* media_transport() { return media_transport_; }
+
+  // Called on the encoder task queue when a new input audio frame is ready
+  // for encoding.
+  void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
+
+  void OnReceivedRtt(int64_t rtt_ms);
+
+  void OnTargetTransferRate(TargetTransferRate) override;
+
+  // Thread checkers document and lock usage of some methods on voe::Channel to
+  // specific threads we know about. The goal is to eventually split up
+  // voe::Channel into parts with single-threaded semantics, and thereby reduce
+  // the need for locks.
+  rtc::ThreadChecker worker_thread_checker_;
+  rtc::ThreadChecker module_process_thread_checker_;
+  // Methods accessed from audio and video threads are checked for sequential-
+  // only access. We don't necessarily own and control these threads, so thread
+  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
+  // audio thread to another, but access is still sequential.
+  rtc::RaceChecker audio_thread_race_checker_;
+
+  rtc::CriticalSection _callbackCritSect;
+  rtc::CriticalSection volume_settings_critsect_;
+
+  bool sending_ RTC_GUARDED_BY(&worker_thread_checker_) = false;
+
+  RtcEventLog* const event_log_;
+
+  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
+
+  std::unique_ptr<AudioCodingModule> audio_coding_;
+  uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_);
+
+  uint16_t send_sequence_number_;
+
+  // uses
+  ProcessThread* const _moduleProcessThreadPtr;
+  Transport* const _transportPtr;  // WebRtc socket or external transport
+  RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_);
+  bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
+  bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_);
+  // VoeRTP_RTCP
+  // TODO(henrika): can today be accessed on the main thread and on the
+  // task queue; hence potential race.
+  bool _includeAudioLevelIndication;
+  size_t transport_overhead_per_packet_
+      RTC_GUARDED_BY(overhead_per_packet_lock_);
+  size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
+  rtc::CriticalSection overhead_per_packet_lock_;
+  // RtcpBandwidthObserver
+  const std::unique_ptr<VoERtcpObserver> rtcp_observer_;
+
+  PacketRouter* packet_router_ RTC_GUARDED_BY(&worker_thread_checker_) =
+      nullptr;
+  const std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
+  const std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
+  const std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
+  const std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
+
+  rtc::ThreadChecker construction_thread_;
+
+  const bool use_twcc_plr_for_ana_;
+
+  rtc::CriticalSection encoder_queue_lock_;
+  bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
+  rtc::TaskQueue* const encoder_queue_ = nullptr;
+
+  MediaTransportInterface* const media_transport_;
+  int media_transport_sequence_number_ RTC_GUARDED_BY(encoder_queue_) = 0;
+
+  rtc::CriticalSection media_transport_lock_;
+  // Currently set by SetLocalSSRC.
+  uint64_t media_transport_channel_id_ RTC_GUARDED_BY(&media_transport_lock_) =
+      0;
+  // Cache payload type and sampling frequency from most recent call to
+  // SetEncoder. Needed to set MediaTransportEncodedAudioFrame metadata, and
+  // invalidate on encoder change.
+  int media_transport_payload_type_ RTC_GUARDED_BY(&media_transport_lock_);
+  int media_transport_sampling_frequency_
+      RTC_GUARDED_BY(&media_transport_lock_);
+
+  // E2EE Audio Frame Encryption
+  rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor_;
+  // E2EE Frame Encryption Options
+  const webrtc::CryptoOptions crypto_options_;
+
+  rtc::CriticalSection bitrate_crit_section_;
+  int configured_bitrate_bps_ RTC_GUARDED_BY(bitrate_crit_section_) = 0;
+};
 
 const int kTelephoneEventAttenuationdB = 10;
 
@@ -441,26 +680,22 @@
   return true;
 }
 
-int ChannelSend::PreferredSampleRate() const {
-  // Return the bigger of playout and receive frequency in the ACM.
-  return std::max(audio_coding_->ReceiveFrequency(),
-                  audio_coding_->PlayoutFrequency());
-}
-
 ChannelSend::ChannelSend(rtc::TaskQueue* encoder_queue,
                          ProcessThread* module_process_thread,
                          MediaTransportInterface* media_transport,
+                         Transport* rtp_transport,
                          RtcpRttStats* rtcp_rtt_stats,
                          RtcEventLog* rtc_event_log,
                          FrameEncryptorInterface* frame_encryptor,
                          const webrtc::CryptoOptions& crypto_options,
-                         bool extmap_allow_mixed)
+                         bool extmap_allow_mixed,
+                         int rtcp_report_interval_ms)
     : event_log_(rtc_event_log),
       _timeStamp(0),  // This is just an offset, RTP module will add it's own
                       // random offset
       send_sequence_number_(0),
       _moduleProcessThreadPtr(module_process_thread),
-      _transportPtr(NULL),
+      _transportPtr(rtp_transport),
       input_mute_(false),
       previous_frame_muted_(false),
       _includeAudioLevelIndication(false),
@@ -480,47 +715,51 @@
       crypto_options_(crypto_options) {
   RTC_DCHECK(module_process_thread);
   RTC_DCHECK(encoder_queue);
+  module_process_thread_checker_.DetachFromThread();
+
   audio_coding_.reset(AudioCodingModule::Create(AudioCodingModule::Config()));
 
   RtpRtcp::Configuration configuration;
+
+  // We gradually remove codepaths that depend on RTP when using media
+  // transport. All of this logic should be moved to the future
+  // RTPMediaTransport. In this case it means that overhead and bandwidth
+  // observers should not be called when using media transport.
+  if (!media_transport_) {
+    configuration.overhead_observer = this;
+    configuration.bandwidth_callback = rtcp_observer_.get();
+    configuration.transport_feedback_callback = feedback_observer_proxy_.get();
+  }
+
   configuration.audio = true;
   configuration.outgoing_transport = this;
-  configuration.overhead_observer = this;
-  configuration.bandwidth_callback = rtcp_observer_.get();
 
   configuration.paced_sender = rtp_packet_sender_proxy_.get();
   configuration.transport_sequence_number_allocator =
       seq_num_allocator_proxy_.get();
-  configuration.transport_feedback_callback = feedback_observer_proxy_.get();
 
   configuration.event_log = event_log_;
   configuration.rtt_stats = rtcp_rtt_stats;
   configuration.retransmission_rate_limiter =
       retransmission_rate_limiter_.get();
   configuration.extmap_allow_mixed = extmap_allow_mixed;
+  configuration.rtcp_report_interval_ms = rtcp_report_interval_ms;
 
   _rtpRtcpModule.reset(RtpRtcp::CreateRtpRtcp(configuration));
   _rtpRtcpModule->SetSendingMediaStatus(false);
-  Init();
-}
 
-ChannelSend::~ChannelSend() {
-  Terminate();
-  RTC_DCHECK(!channel_state_.Get().sending);
-}
+  // We want to invoke the 'TargetRateObserver' and |OnOverheadChanged|
+  // callbacks after the audio_coding_ is fully initialized.
+  if (media_transport_) {
+    RTC_DLOG(LS_INFO) << "Setting media_transport_ rate observers.";
+    media_transport_->AddTargetTransferRateObserver(this);
+    OnOverheadChanged(media_transport_->GetAudioPacketOverhead());
+  } else {
+    RTC_DLOG(LS_INFO) << "Not setting media_transport_ rate observers.";
+  }
 
-void ChannelSend::Init() {
-  channel_state_.Reset();
-
-  // --- Add modules to process thread (for periodic schedulation)
   _moduleProcessThreadPtr->RegisterModule(_rtpRtcpModule.get(), RTC_FROM_HERE);
 
-  // --- ACM initialization
-  int error = audio_coding_->InitializeReceiver();
-  RTC_DCHECK_EQ(0, error);
-
-  // --- RTP/RTCP module initialization
-
   // Ensure that RTCP is enabled by default for the created channel.
   // Note that, the module will keep generating RTCP until it is explicitly
   // disabled by the user.
@@ -529,36 +768,30 @@
   // RTCP is enabled by default.
   _rtpRtcpModule->SetRTCPStatus(RtcpMode::kCompound);
 
-  // --- Register all permanent callbacks
-  error = audio_coding_->RegisterTransportCallback(this);
+  int error = audio_coding_->RegisterTransportCallback(this);
   RTC_DCHECK_EQ(0, error);
 }
 
-void ChannelSend::Terminate() {
+ChannelSend::~ChannelSend() {
   RTC_DCHECK(construction_thread_.CalledOnValidThread());
-  // Must be called on the same thread as Init().
+
+  if (media_transport_) {
+    media_transport_->RemoveTargetTransferRateObserver(this);
+  }
 
   StopSend();
 
-  // The order to safely shutdown modules in a channel is:
-  // 1. De-register callbacks in modules
-  // 2. De-register modules in process thread
-  // 3. Destroy modules
   int error = audio_coding_->RegisterTransportCallback(NULL);
   RTC_DCHECK_EQ(0, error);
 
-  // De-register modules in process thread
   if (_moduleProcessThreadPtr)
     _moduleProcessThreadPtr->DeRegisterModule(_rtpRtcpModule.get());
-
-  // End of modules shutdown
 }
 
-int32_t ChannelSend::StartSend() {
-  if (channel_state_.Get().sending) {
-    return 0;
-  }
-  channel_state_.SetSending(true);
+void ChannelSend::StartSend() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK(!sending_);
+  sending_ = true;
 
   // Resume the previous sequence number which was reset by StopSend(). This
   // needs to be done before |sending| is set to true on the RTP/RTCP module.
@@ -566,26 +799,21 @@
     _rtpRtcpModule->SetSequenceNumber(send_sequence_number_);
   }
   _rtpRtcpModule->SetSendingMediaStatus(true);
-  if (_rtpRtcpModule->SetSendingStatus(true) != 0) {
-    RTC_DLOG(LS_ERROR) << "StartSend() RTP/RTCP failed to start sending";
-    _rtpRtcpModule->SetSendingMediaStatus(false);
-    rtc::CritScope cs(&_callbackCritSect);
-    channel_state_.SetSending(false);
-    return -1;
-  }
+  int ret = _rtpRtcpModule->SetSendingStatus(true);
+  RTC_DCHECK_EQ(0, ret);
   {
     // It is now OK to start posting tasks to the encoder task queue.
     rtc::CritScope cs(&encoder_queue_lock_);
     encoder_queue_is_active_ = true;
   }
-  return 0;
 }
 
 void ChannelSend::StopSend() {
-  if (!channel_state_.Get().sending) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  if (!sending_) {
     return;
   }
-  channel_state_.SetSending(false);
+  sending_ = false;
 
   // Post a task to the encoder thread which sets an event when the task is
   // executed. We know that no more encoding tasks will be added to the task
@@ -623,6 +851,7 @@
 
 bool ChannelSend::SetEncoder(int payload_type,
                              std::unique_ptr<AudioEncoder> encoder) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_GE(payload_type, 0);
   RTC_DCHECK_LE(payload_type, 127);
   // TODO(ossu): Make CodecInsts up, for now: one for the RTP/RTCP module and
@@ -666,24 +895,35 @@
 
 void ChannelSend::ModifyEncoder(
     rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   audio_coding_->ModifyEncoder(modifier);
 }
 
-void ChannelSend::SetBitRate(int bitrate_bps, int64_t probing_interval_ms) {
+void ChannelSend::OnBitrateAllocation(BitrateAllocationUpdate update) {
+  // This method can be called on the worker thread, module process thread
+  // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
+  // TODO(solenberg): Figure out a good way to check this or enforce calling
+  // rules.
+  // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
+  //            module_process_thread_checker_.CalledOnValidThread());
+  rtc::CritScope lock(&bitrate_crit_section_);
+
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
     if (*encoder) {
-      (*encoder)->OnReceivedUplinkBandwidth(bitrate_bps, probing_interval_ms);
+      (*encoder)->OnReceivedUplinkAllocation(update);
     }
   });
-  retransmission_rate_limiter_->SetMaxRate(bitrate_bps);
-  configured_bitrate_bps_ = bitrate_bps;
+  retransmission_rate_limiter_->SetMaxRate(update.target_bitrate.bps());
+  configured_bitrate_bps_ = update.target_bitrate.bps();
 }
 
-int ChannelSend::GetBitRate() const {
+int ChannelSend::GetBitrate() const {
+  rtc::CritScope lock(&bitrate_crit_section_);
   return configured_bitrate_bps_;
 }
 
 void ChannelSend::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   if (!use_twcc_plr_for_ana_)
     return;
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
@@ -695,6 +935,7 @@
 
 void ChannelSend::OnRecoverableUplinkPacketLossRate(
     float recoverable_packet_loss_rate) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
     if (*encoder) {
       (*encoder)->OnReceivedUplinkRecoverablePacketLossFraction(
@@ -713,47 +954,22 @@
   });
 }
 
-bool ChannelSend::EnableAudioNetworkAdaptor(const std::string& config_string) {
-  bool success = false;
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder) {
-      success =
-          (*encoder)->EnableAudioNetworkAdaptor(config_string, event_log_);
-    }
-  });
-  return success;
-}
+// TODO(nisse): Delete always-true return value.
+bool ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
+  // May be called on either worker thread or network thread.
+  if (media_transport_) {
+    // Ignore RTCP packets while media transport is used.
+    // Those packets should not arrive, but we are seeing occasional packets.
+    return 0;
+  }
 
-void ChannelSend::DisableAudioNetworkAdaptor() {
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder)
-      (*encoder)->DisableAudioNetworkAdaptor();
-  });
-}
-
-void ChannelSend::SetReceiverFrameLengthRange(int min_frame_length_ms,
-                                              int max_frame_length_ms) {
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder) {
-      (*encoder)->SetReceiverFrameLengthRange(min_frame_length_ms,
-                                              max_frame_length_ms);
-    }
-  });
-}
-
-void ChannelSend::RegisterTransport(Transport* transport) {
-  rtc::CritScope cs(&_callbackCritSect);
-  _transportPtr = transport;
-}
-
-int32_t ChannelSend::ReceivedRTCPPacket(const uint8_t* data, size_t length) {
   // Deliver RTCP packet to RTP/RTCP module for parsing
   _rtpRtcpModule->IncomingRtcpPacket(data, length);
 
   int64_t rtt = GetRTT();
   if (rtt == 0) {
     // Waiting for valid RTT.
-    return 0;
+    return true;
   }
 
   int64_t nack_window_ms = rtt;
@@ -764,16 +980,12 @@
   }
   retransmission_rate_limiter_->SetWindowSize(nack_window_ms);
 
-  // Invoke audio encoders OnReceivedRtt().
-  audio_coding_->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-    if (*encoder)
-      (*encoder)->OnReceivedRtt(rtt);
-  });
-
-  return 0;
+  OnReceivedRtt(rtt);
+  return true;
 }
 
 void ChannelSend::SetInputMute(bool enable) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&volume_settings_critsect_);
   input_mute_ = enable;
 }
@@ -783,24 +995,26 @@
   return input_mute_;
 }
 
-int ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) {
+bool ChannelSend::SendTelephoneEventOutband(int event, int duration_ms) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_LE(0, event);
   RTC_DCHECK_GE(255, event);
   RTC_DCHECK_LE(0, duration_ms);
   RTC_DCHECK_GE(65535, duration_ms);
-  if (!Sending()) {
-    return -1;
+  if (!sending_) {
+    return false;
   }
   if (_rtpRtcpModule->SendTelephoneEventOutband(
           event, duration_ms, kTelephoneEventAttenuationdB) != 0) {
     RTC_DLOG(LS_ERROR) << "SendTelephoneEventOutband() failed to send event";
-    return -1;
+    return false;
   }
-  return 0;
+  return true;
 }
 
-int ChannelSend::SetSendTelephoneEventPayloadType(int payload_type,
-                                                  int payload_frequency) {
+bool ChannelSend::SetSendTelephoneEventPayloadType(int payload_type,
+                                                   int payload_frequency) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK_LE(0, payload_type);
   RTC_DCHECK_GE(127, payload_type);
   CodecInst codec = {0};
@@ -813,42 +1027,44 @@
       RTC_DLOG(LS_ERROR)
           << "SetSendTelephoneEventPayloadType() failed to register "
              "send payload type";
-      return -1;
+      return false;
     }
   }
-  return 0;
+  return true;
 }
 
-int ChannelSend::SetLocalSSRC(unsigned int ssrc) {
-  if (channel_state_.Get().sending) {
-    RTC_DLOG(LS_ERROR) << "SetLocalSSRC() already sending";
-    return -1;
-  }
+void ChannelSend::SetLocalSSRC(uint32_t ssrc) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  RTC_DCHECK(!sending_);
+
   if (media_transport_) {
     rtc::CritScope cs(&media_transport_lock_);
     media_transport_channel_id_ = ssrc;
   }
   _rtpRtcpModule->SetSSRC(ssrc);
-  return 0;
 }
 
 void ChannelSend::SetMid(const std::string& mid, int extension_id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   int ret = SetSendRtpHeaderExtension(true, kRtpExtensionMid, extension_id);
   RTC_DCHECK_EQ(0, ret);
   _rtpRtcpModule->SetMid(mid);
 }
 
 void ChannelSend::SetExtmapAllowMixed(bool extmap_allow_mixed) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   _rtpRtcpModule->SetExtmapAllowMixed(extmap_allow_mixed);
 }
 
-int ChannelSend::SetSendAudioLevelIndicationStatus(bool enable,
-                                                   unsigned char id) {
+void ChannelSend::SetSendAudioLevelIndicationStatus(bool enable, int id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   _includeAudioLevelIndication = enable;
-  return SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+  int ret = SetSendRtpHeaderExtension(enable, kRtpExtensionAudioLevel, id);
+  RTC_DCHECK_EQ(0, ret);
 }
 
 void ChannelSend::EnableSendTransportSequenceNumber(int id) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   int ret =
       SetSendRtpHeaderExtension(true, kRtpExtensionTransportSequenceNumber, id);
   RTC_DCHECK_EQ(0, ret);
@@ -857,6 +1073,7 @@
 void ChannelSend::RegisterSenderCongestionControlObjects(
     RtpTransportControllerSendInterface* transport,
     RtcpBandwidthObserver* bandwidth_observer) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RtpPacketSender* rtp_packet_sender = transport->packet_sender();
   TransportFeedbackObserver* transport_feedback_observer =
       transport->transport_feedback_observer();
@@ -878,6 +1095,7 @@
 }
 
 void ChannelSend::ResetSenderCongestionControlObjects() {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   RTC_DCHECK(packet_router_);
   _rtpRtcpModule->SetStorePacketsStatus(false, 600);
   rtcp_observer_->SetBandwidthObserver(nullptr);
@@ -888,35 +1106,25 @@
   rtp_packet_sender_proxy_->SetPacketSender(nullptr);
 }
 
-void ChannelSend::SetRTCPStatus(bool enable) {
-  _rtpRtcpModule->SetRTCPStatus(enable ? RtcpMode::kCompound : RtcpMode::kOff);
+void ChannelSend::SetRTCP_CNAME(absl::string_view c_name) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  // Note: SetCNAME() accepts a c string of length at most 255.
+  const std::string c_name_limited(c_name.substr(0, 255));
+  int ret = _rtpRtcpModule->SetCNAME(c_name_limited.c_str()) != 0;
+  RTC_DCHECK_EQ(0, ret) << "SetRTCP_CNAME() failed to set RTCP CNAME";
 }
 
-int ChannelSend::SetRTCP_CNAME(const char cName[256]) {
-  if (_rtpRtcpModule->SetCNAME(cName) != 0) {
-    RTC_DLOG(LS_ERROR) << "SetRTCP_CNAME() failed to set RTCP CNAME";
-    return -1;
-  }
-  return 0;
-}
-
-int ChannelSend::GetRemoteRTCPReportBlocks(
-    std::vector<ReportBlock>* report_blocks) {
-  if (report_blocks == NULL) {
-    RTC_DLOG(LS_ERROR) << "GetRemoteRTCPReportBlock()s invalid report_blocks.";
-    return -1;
-  }
-
+std::vector<ReportBlock> ChannelSend::GetRemoteRTCPReportBlocks() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   // Get the report blocks from the latest received RTCP Sender or Receiver
   // Report. Each element in the vector contains the sender's SSRC and a
   // report block according to RFC 3550.
   std::vector<RTCPReportBlock> rtcp_report_blocks;
-  if (_rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks) != 0) {
-    return -1;
-  }
 
-  if (rtcp_report_blocks.empty())
-    return 0;
+  int ret = _rtpRtcpModule->RemoteRTCPStat(&rtcp_report_blocks);
+  RTC_DCHECK_EQ(0, ret);
+
+  std::vector<ReportBlock> report_blocks;
 
   std::vector<RTCPReportBlock>::const_iterator it = rtcp_report_blocks.begin();
   for (; it != rtcp_report_blocks.end(); ++it) {
@@ -930,19 +1138,16 @@
     report_block.interarrival_jitter = it->jitter;
     report_block.last_SR_timestamp = it->last_sender_report_timestamp;
     report_block.delay_since_last_SR = it->delay_since_last_sender_report;
-    report_blocks->push_back(report_block);
+    report_blocks.push_back(report_block);
   }
-  return 0;
+  return report_blocks;
 }
 
-int ChannelSend::GetRTPStatistics(CallSendStatistics& stats) {
-  // --- RtcpStatistics
-
-  // --- RTT
+CallSendStatistics ChannelSend::GetRTCPStatistics() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
+  CallSendStatistics stats = {0};
   stats.rttMs = GetRTT();
 
-  // --- Data counters
-
   size_t bytesSent(0);
   uint32_t packetsSent(0);
 
@@ -955,24 +1160,12 @@
   stats.bytesSent = bytesSent;
   stats.packetsSent = packetsSent;
 
-  return 0;
-}
-
-void ChannelSend::SetNACKStatus(bool enable, int maxNumberOfPackets) {
-  // None of these functions can fail.
-  if (enable)
-    audio_coding_->EnableNack(maxNumberOfPackets);
-  else
-    audio_coding_->DisableNack();
-}
-
-// Called when we are missing one or more packets.
-int ChannelSend::ResendPackets(const uint16_t* sequence_numbers, int length) {
-  return _rtpRtcpModule->SendNACK(sequence_numbers, length);
+  return stats;
 }
 
 void ChannelSend::ProcessAndEncodeAudio(
     std::unique_ptr<AudioFrame> audio_frame) {
+  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
   // Avoid posting any new tasks if sending was already stopped in StopSend().
   rtc::CritScope cs(&encoder_queue_lock_);
   if (!encoder_queue_is_active_) {
@@ -1038,6 +1231,7 @@
 }
 
 void ChannelSend::SetTransportOverhead(size_t transport_overhead_per_packet) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&overhead_per_packet_lock_);
   transport_overhead_per_packet_ = transport_overhead_per_packet;
   UpdateOverheadForEncoder();
@@ -1051,36 +1245,42 @@
 }
 
 ANAStats ChannelSend::GetANAStatistics() const {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   return audio_coding_->GetANAStats();
 }
 
 RtpRtcp* ChannelSend::GetRtpRtcp() const {
+  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
   return _rtpRtcpModule.get();
 }
 
 int ChannelSend::SetSendRtpHeaderExtension(bool enable,
                                            RTPExtensionType type,
-                                           unsigned char id) {
+                                           int id) {
   int error = 0;
   _rtpRtcpModule->DeregisterSendRtpHeaderExtension(type);
   if (enable) {
-    error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(type, id);
+    // TODO(nisse): RtpRtcp::RegisterSendRtpHeaderExtension to take an int
+    // argument. Currently it wants an uint8_t.
+    error = _rtpRtcpModule->RegisterSendRtpHeaderExtension(
+        type, rtc::dchecked_cast<uint8_t>(id));
   }
   return error;
 }
 
-int ChannelSend::GetRtpTimestampRateHz() const {
-  const auto format = audio_coding_->ReceiveFormat();
-  // Default to the playout frequency if we've not gotten any packets yet.
-  // TODO(ossu): Zero clockrate can only happen if we've added an external
-  // decoder for a format we don't support internally. Remove once that way of
-  // adding decoders is gone!
-  return (format && format->clockrate_hz != 0)
-             ? format->clockrate_hz
-             : audio_coding_->PlayoutFrequency();
-}
-
 int64_t ChannelSend::GetRTT() const {
+  if (media_transport_) {
+    // GetRTT is generally used in the RTCP codepath, where media transport is
+    // not present and so it shouldn't be needed. But it's also invoked in
+    // 'GetStats' method, and for now returning media transport RTT here gives
+    // us "free" rtt stats for media transport.
+    auto target_rate = media_transport_->GetLatestTargetTransferRate();
+    if (target_rate.has_value()) {
+      return target_rate.value().network_estimate.round_trip_time.ms();
+    }
+
+    return 0;
+  }
   RtcpMode method = _rtpRtcpModule->RTCP();
   if (method == RtcpMode::kOff) {
     return 0;
@@ -1107,6 +1307,7 @@
 
 void ChannelSend::SetFrameEncryptor(
     rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) {
+  RTC_DCHECK_RUN_ON(&worker_thread_checker_);
   rtc::CritScope cs(&encoder_queue_lock_);
   if (encoder_queue_is_active_) {
     encoder_queue_->PostTask([this, frame_encryptor]() {
@@ -1117,5 +1318,39 @@
   }
 }
 
+void ChannelSend::OnTargetTransferRate(TargetTransferRate rate) {
+  RTC_DCHECK(media_transport_);
+  OnReceivedRtt(rate.network_estimate.round_trip_time.ms());
+}
+
+void ChannelSend::OnReceivedRtt(int64_t rtt_ms) {
+  // Invoke audio encoders OnReceivedRtt().
+  audio_coding_->ModifyEncoder(
+      [rtt_ms](std::unique_ptr<AudioEncoder>* encoder) {
+        if (*encoder) {
+          (*encoder)->OnReceivedRtt(rtt_ms);
+        }
+      });
+}
+
+}  // namespace
+
+std::unique_ptr<ChannelSendInterface> CreateChannelSend(
+    rtc::TaskQueue* encoder_queue,
+    ProcessThread* module_process_thread,
+    MediaTransportInterface* media_transport,
+    Transport* rtp_transport,
+    RtcpRttStats* rtcp_rtt_stats,
+    RtcEventLog* rtc_event_log,
+    FrameEncryptorInterface* frame_encryptor,
+    const webrtc::CryptoOptions& crypto_options,
+    bool extmap_allow_mixed,
+    int rtcp_report_interval_ms) {
+  return absl::make_unique<ChannelSend>(
+      encoder_queue, module_process_thread, media_transport, rtp_transport,
+      rtcp_rtt_stats, rtc_event_log, frame_encryptor, crypto_options,
+      extmap_allow_mixed, rtcp_report_interval_ms);
+}
+
 }  // namespace voe
 }  // namespace webrtc
diff --git a/audio/channel_send.h b/audio/channel_send.h
index 407303f..083e9a6 100644
--- a/audio/channel_send.h
+++ b/audio/channel_send.h
@@ -11,44 +11,26 @@
 #ifndef AUDIO_CHANNEL_SEND_H_
 #define AUDIO_CHANNEL_SEND_H_
 
-#include <map>
 #include <memory>
 #include <string>
 #include <vector>
 
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/audio_encoder.h"
-#include "api/call/transport.h"
 #include "api/crypto/cryptooptions.h"
 #include "api/media_transport_interface.h"
-#include "common_types.h"  // NOLINT(build/include)
-#include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/audio_processing/rms_level.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp.h"
-#include "rtc_base/criticalsection.h"
+#include "rtc_base/function_view.h"
 #include "rtc_base/task_queue.h"
-#include "rtc_base/thread_checker.h"
-
-// TODO(solenberg, nisse): This file contains a few NOLINT marks, to silence
-// warnings about use of unsigned short, and non-const reference arguments.
-// These need cleanup, in a separate cl.
-
-namespace rtc {
-class TimestampWrapAroundHandler;
-}
 
 namespace webrtc {
 
 class FrameEncryptorInterface;
-class PacketRouter;
 class ProcessThread;
-class RateLimiter;
 class RtcEventLog;
 class RtpRtcp;
 class RtpTransportControllerSendInterface;
 
-struct SenderInfo;
-
 struct CallSendStatistics {
   int64_t rttMs;
   size_t bytesSent;
@@ -69,277 +51,77 @@
 
 namespace voe {
 
-class RtpPacketSenderProxy;
-class TransportFeedbackProxy;
-class TransportSequenceNumberProxy;
-class VoERtcpObserver;
-
-// Helper class to simplify locking scheme for members that are accessed from
-// multiple threads.
-// Example: a member can be set on thread T1 and read by an internal audio
-// thread T2. Accessing the member via this class ensures that we are
-// safe and also avoid TSan v2 warnings.
-class ChannelSendState {
+class ChannelSendInterface {
  public:
-  struct State {
-    bool sending = false;
-  };
+  virtual ~ChannelSendInterface() = default;
 
-  ChannelSendState() {}
-  virtual ~ChannelSendState() {}
+  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length) = 0;
 
-  void Reset() {
-    rtc::CritScope lock(&lock_);
-    state_ = State();
-  }
+  virtual CallSendStatistics GetRTCPStatistics() const = 0;
 
-  State Get() const {
-    rtc::CritScope lock(&lock_);
-    return state_;
-  }
+  virtual bool SetEncoder(int payload_type,
+                          std::unique_ptr<AudioEncoder> encoder) = 0;
+  virtual void ModifyEncoder(
+      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) = 0;
 
-  void SetSending(bool enable) {
-    rtc::CritScope lock(&lock_);
-    state_.sending = enable;
-  }
-
- private:
-  rtc::CriticalSection lock_;
-  State state_;
-};
-
-class ChannelSend
-    : public Transport,
-      public AudioPacketizationCallback,  // receive encoded packets from the
-                                          // ACM
-      public OverheadObserver {
- public:
-  // TODO(nisse): Make OnUplinkPacketLossRate public, and delete friend
-  // declaration.
-  friend class VoERtcpObserver;
-
-  ChannelSend(rtc::TaskQueue* encoder_queue,
-              ProcessThread* module_process_thread,
-              MediaTransportInterface* media_transport,
-              RtcpRttStats* rtcp_rtt_stats,
-              RtcEventLog* rtc_event_log,
-              FrameEncryptorInterface* frame_encryptor,
-              const webrtc::CryptoOptions& crypto_options,
-              bool extmap_allow_mixed);
-
-  virtual ~ChannelSend();
-
-  // Send using this encoder, with this payload type.
-  bool SetEncoder(int payload_type, std::unique_ptr<AudioEncoder> encoder);
-  void ModifyEncoder(
-      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
-
-  // API methods
-
-  // VoEBase
-  int32_t StartSend();
-  void StopSend();
-
-  // Codecs
-  void SetBitRate(int bitrate_bps, int64_t probing_interval_ms);
-  int GetBitRate() const;
-  bool EnableAudioNetworkAdaptor(const std::string& config_string);
-  void DisableAudioNetworkAdaptor();
-
-  // TODO(nisse): Modifies decoder, but not used?
-  void SetReceiverFrameLengthRange(int min_frame_length_ms,
-                                   int max_frame_length_ms);
-
-  // Network
-  void RegisterTransport(Transport* transport);
-  // TODO(nisse, solenberg): Delete when VoENetwork is deleted.
-  int32_t ReceivedRTCPPacket(const uint8_t* data, size_t length);
-
-  // Muting, Volume and Level.
-  void SetInputMute(bool enable);
-
-  // Stats.
-  ANAStats GetANAStatistics() const;
-
-  // Used by AudioSendStream.
-  RtpRtcp* GetRtpRtcp() const;
-
-  // DTMF.
-  int SendTelephoneEventOutband(int event, int duration_ms);
-  int SetSendTelephoneEventPayloadType(int payload_type, int payload_frequency);
-
-  // RTP+RTCP
-  int SetLocalSSRC(unsigned int ssrc);
-
-  void SetMid(const std::string& mid, int extension_id);
-  void SetExtmapAllowMixed(bool extmap_allow_mixed);
-  int SetSendAudioLevelIndicationStatus(bool enable, unsigned char id);
-  void EnableSendTransportSequenceNumber(int id);
-
-  void RegisterSenderCongestionControlObjects(
+  virtual void SetLocalSSRC(uint32_t ssrc) = 0;
+  virtual void SetMid(const std::string& mid, int extension_id) = 0;
+  virtual void SetRTCP_CNAME(absl::string_view c_name) = 0;
+  virtual void SetExtmapAllowMixed(bool extmap_allow_mixed) = 0;
+  virtual void SetSendAudioLevelIndicationStatus(bool enable, int id) = 0;
+  virtual void EnableSendTransportSequenceNumber(int id) = 0;
+  virtual void RegisterSenderCongestionControlObjects(
       RtpTransportControllerSendInterface* transport,
-      RtcpBandwidthObserver* bandwidth_observer);
-  void ResetSenderCongestionControlObjects();
-  void SetRTCPStatus(bool enable);
-  int SetRTCP_CNAME(const char cName[256]);
-  int GetRemoteRTCPReportBlocks(std::vector<ReportBlock>* report_blocks);
-  int GetRTPStatistics(CallSendStatistics& stats);  // NOLINT
-  void SetNACKStatus(bool enable, int maxNumberOfPackets);
+      RtcpBandwidthObserver* bandwidth_observer) = 0;
+  virtual void ResetSenderCongestionControlObjects() = 0;
+  virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const = 0;
+  virtual ANAStats GetANAStatistics() const = 0;
+  virtual bool SetSendTelephoneEventPayloadType(int payload_type,
+                                                int payload_frequency) = 0;
+  virtual bool SendTelephoneEventOutband(int event, int duration_ms) = 0;
+  virtual void OnBitrateAllocation(BitrateAllocationUpdate update) = 0;
+  virtual int GetBitrate() const = 0;
+  virtual void SetInputMute(bool muted) = 0;
 
-  // From AudioPacketizationCallback in the ACM
-  int32_t SendData(FrameType frameType,
-                   uint8_t payloadType,
-                   uint32_t timeStamp,
-                   const uint8_t* payloadData,
-                   size_t payloadSize,
-                   const RTPFragmentationHeader* fragmentation) override;
+  virtual void ProcessAndEncodeAudio(
+      std::unique_ptr<AudioFrame> audio_frame) = 0;
+  virtual void SetTransportOverhead(size_t transport_overhead_per_packet) = 0;
+  virtual RtpRtcp* GetRtpRtcp() const = 0;
 
-  // From Transport (called by the RTP/RTCP module)
-  bool SendRtp(const uint8_t* data,
-               size_t len,
-               const PacketOptions& packet_options) override;
-  bool SendRtcp(const uint8_t* data, size_t len) override;
+  virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) = 0;
+  virtual void OnRecoverableUplinkPacketLossRate(
+      float recoverable_packet_loss_rate) = 0;
+  // In RTP we currently rely on RTCP packets (|ReceivedRTCPPacket|) to inform
+  // about RTT.
+  // In media transport we rely on the TargetTransferRateObserver instead.
+  // In other words, if you are using RTP, you should expect
+  // |ReceivedRTCPPacket| to be called, if you are using media transport,
+  // |OnTargetTransferRate| will be called.
+  //
+  // In future, RTP media will move to the media transport implementation and
+  // these conditions will be removed.
+  // Returns the RTT in milliseconds.
+  virtual int64_t GetRTT() const = 0;
+  virtual void StartSend() = 0;
+  virtual void StopSend() = 0;
 
-  int PreferredSampleRate() const;
-
-  bool Sending() const { return channel_state_.Get().sending; }
-  RtpRtcp* RtpRtcpModulePtr() const { return _rtpRtcpModule.get(); }
-
-  // ProcessAndEncodeAudio() posts a task on the shared encoder task queue,
-  // which in turn calls (on the queue) ProcessAndEncodeAudioOnTaskQueue() where
-  // the actual processing of the audio takes place. The processing mainly
-  // consists of encoding and preparing the result for sending by adding it to a
-  // send queue.
-  // The main reason for using a task queue here is to release the native,
-  // OS-specific, audio capture thread as soon as possible to ensure that it
-  // can go back to sleep and be prepared to deliver an new captured audio
-  // packet.
-  void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
-
-  void SetTransportOverhead(size_t transport_overhead_per_packet);
-
-  // From OverheadObserver in the RTP/RTCP module
-  void OnOverheadChanged(size_t overhead_bytes_per_packet) override;
-
-  // The existence of this function alongside OnUplinkPacketLossRate is
-  // a compromise. We want the encoder to be agnostic of the PLR source, but
-  // we also don't want it to receive conflicting information from TWCC and
-  // from RTCP-XR.
-  void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
-
-  void OnRecoverableUplinkPacketLossRate(float recoverable_packet_loss_rate);
-
-  int64_t GetRTT() const;
-
-  // E2EE Custom Audio Frame Encryption
-  void SetFrameEncryptor(
-      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor);
-
- private:
-  class ProcessAndEncodeAudioTask;
-
-  void Init();
-  void Terminate();
-
-  void OnUplinkPacketLossRate(float packet_loss_rate);
-  bool InputMute() const;
-
-  int ResendPackets(const uint16_t* sequence_numbers, int length);
-
-  int SetSendRtpHeaderExtension(bool enable,
-                                RTPExtensionType type,
-                                unsigned char id);
-
-  void UpdateOverheadForEncoder()
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(overhead_per_packet_lock_);
-
-  int GetRtpTimestampRateHz() const;
-
-  int32_t SendRtpAudio(FrameType frameType,
-                       uint8_t payloadType,
-                       uint32_t timeStamp,
-                       rtc::ArrayView<const uint8_t> payload,
-                       const RTPFragmentationHeader* fragmentation);
-
-  int32_t SendMediaTransportAudio(FrameType frameType,
-                                  uint8_t payloadType,
-                                  uint32_t timeStamp,
-                                  rtc::ArrayView<const uint8_t> payload,
-                                  const RTPFragmentationHeader* fragmentation);
-
-  // Return media transport or nullptr if using RTP.
-  MediaTransportInterface* media_transport() { return media_transport_; }
-
-  // Called on the encoder task queue when a new input audio frame is ready
-  // for encoding.
-  void ProcessAndEncodeAudioOnTaskQueue(AudioFrame* audio_input);
-
-  rtc::CriticalSection _callbackCritSect;
-  rtc::CriticalSection volume_settings_critsect_;
-
-  ChannelSendState channel_state_;
-
-  RtcEventLog* const event_log_;
-
-  std::unique_ptr<RtpRtcp> _rtpRtcpModule;
-
-  std::unique_ptr<AudioCodingModule> audio_coding_;
-  uint32_t _timeStamp RTC_GUARDED_BY(encoder_queue_);
-
-  uint16_t send_sequence_number_;
-
-  // uses
-  ProcessThread* _moduleProcessThreadPtr;
-  Transport* _transportPtr;  // WebRtc socket or external transport
-  RmsLevel rms_level_ RTC_GUARDED_BY(encoder_queue_);
-  bool input_mute_ RTC_GUARDED_BY(volume_settings_critsect_);
-  bool previous_frame_muted_ RTC_GUARDED_BY(encoder_queue_);
-  // VoeRTP_RTCP
-  // TODO(henrika): can today be accessed on the main thread and on the
-  // task queue; hence potential race.
-  bool _includeAudioLevelIndication;
-  size_t transport_overhead_per_packet_
-      RTC_GUARDED_BY(overhead_per_packet_lock_);
-  size_t rtp_overhead_per_packet_ RTC_GUARDED_BY(overhead_per_packet_lock_);
-  rtc::CriticalSection overhead_per_packet_lock_;
-  // RtcpBandwidthObserver
-  std::unique_ptr<VoERtcpObserver> rtcp_observer_;
-
-  PacketRouter* packet_router_ = nullptr;
-  std::unique_ptr<TransportFeedbackProxy> feedback_observer_proxy_;
-  std::unique_ptr<TransportSequenceNumberProxy> seq_num_allocator_proxy_;
-  std::unique_ptr<RtpPacketSenderProxy> rtp_packet_sender_proxy_;
-  std::unique_ptr<RateLimiter> retransmission_rate_limiter_;
-
-  rtc::ThreadChecker construction_thread_;
-
-  const bool use_twcc_plr_for_ana_;
-
-  rtc::CriticalSection encoder_queue_lock_;
-  bool encoder_queue_is_active_ RTC_GUARDED_BY(encoder_queue_lock_) = false;
-  rtc::TaskQueue* encoder_queue_ = nullptr;
-
-  MediaTransportInterface* const media_transport_;
-  int media_transport_sequence_number_ RTC_GUARDED_BY(encoder_queue_) = 0;
-
-  rtc::CriticalSection media_transport_lock_;
-  // Currently set by SetLocalSSRC.
-  uint64_t media_transport_channel_id_ RTC_GUARDED_BY(&media_transport_lock_) =
-      0;
-  // Cache payload type and sampling frequency from most recent call to
-  // SetEncoder. Needed to set MediaTransportEncodedAudioFrame metadata, and
-  // invalidate on encoder change.
-  int media_transport_payload_type_ RTC_GUARDED_BY(&media_transport_lock_);
-  int media_transport_sampling_frequency_
-      RTC_GUARDED_BY(&media_transport_lock_);
-
-  // E2EE Audio Frame Encryption
-  rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor_;
-  // E2EE Frame Encryption Options
-  webrtc::CryptoOptions crypto_options_;
-  int configured_bitrate_bps_ = 0;
+  // E2EE Custom Audio Frame Encryption (Optional)
+  virtual void SetFrameEncryptor(
+      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) = 0;
 };
 
+std::unique_ptr<ChannelSendInterface> CreateChannelSend(
+    rtc::TaskQueue* encoder_queue,
+    ProcessThread* module_process_thread,
+    MediaTransportInterface* media_transport,
+    Transport* rtp_transport,
+    RtcpRttStats* rtcp_rtt_stats,
+    RtcEventLog* rtc_event_log,
+    FrameEncryptorInterface* frame_encryptor,
+    const webrtc::CryptoOptions& crypto_options,
+    bool extmap_allow_mixed,
+    int rtcp_report_interval_ms);
+
 }  // namespace voe
 }  // namespace webrtc
 
diff --git a/audio/channel_send_proxy.cc b/audio/channel_send_proxy.cc
deleted file mode 100644
index 2d0bdd3..0000000
--- a/audio/channel_send_proxy.cc
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/channel_send_proxy.h"
-
-#include <utility>
-
-#include "api/crypto/frameencryptorinterface.h"
-#include "call/rtp_transport_controller_send_interface.h"
-#include "rtc_base/checks.h"
-
-namespace webrtc {
-namespace voe {
-ChannelSendProxy::ChannelSendProxy() {}
-
-ChannelSendProxy::ChannelSendProxy(std::unique_ptr<ChannelSend> channel)
-    : channel_(std::move(channel)) {
-  RTC_DCHECK(channel_);
-  module_process_thread_checker_.DetachFromThread();
-}
-
-ChannelSendProxy::~ChannelSendProxy() {}
-
-void ChannelSendProxy::SetLocalSSRC(uint32_t ssrc) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetLocalSSRC(ssrc);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::SetNACKStatus(bool enable, int max_packets) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetNACKStatus(enable, max_packets);
-}
-
-CallSendStatistics ChannelSendProxy::GetRTCPStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  CallSendStatistics stats = {0};
-  int error = channel_->GetRTPStatistics(stats);
-  RTC_DCHECK_EQ(0, error);
-  return stats;
-}
-
-void ChannelSendProxy::RegisterTransport(Transport* transport) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterTransport(transport);
-}
-
-bool ChannelSendProxy::ReceivedRTCPPacket(const uint8_t* packet,
-                                          size_t length) {
-  // May be called on either worker thread or network thread.
-  return channel_->ReceivedRTCPPacket(packet, length) == 0;
-}
-
-bool ChannelSendProxy::SetEncoder(int payload_type,
-                                  std::unique_ptr<AudioEncoder> encoder) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SetEncoder(payload_type, std::move(encoder));
-}
-
-void ChannelSendProxy::ModifyEncoder(
-    rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ModifyEncoder(modifier);
-}
-
-void ChannelSendProxy::SetRTCPStatus(bool enable) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetRTCPStatus(enable);
-}
-
-void ChannelSendProxy::SetMid(const std::string& mid, int extension_id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetMid(mid, extension_id);
-}
-
-void ChannelSendProxy::SetRTCP_CNAME(const std::string& c_name) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  // Note: VoERTP_RTCP::SetRTCP_CNAME() accepts a char[256] array.
-  std::string c_name_limited = c_name.substr(0, 255);
-  int error = channel_->SetRTCP_CNAME(c_name_limited.c_str());
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::SetExtmapAllowMixed(bool extmap_allow_mixed) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetExtmapAllowMixed(extmap_allow_mixed);
-}
-
-void ChannelSendProxy::SetSendAudioLevelIndicationStatus(bool enable, int id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->SetSendAudioLevelIndicationStatus(enable, id);
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::EnableSendTransportSequenceNumber(int id) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->EnableSendTransportSequenceNumber(id);
-}
-
-void ChannelSendProxy::RegisterSenderCongestionControlObjects(
-    RtpTransportControllerSendInterface* transport,
-    RtcpBandwidthObserver* bandwidth_observer) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->RegisterSenderCongestionControlObjects(transport,
-                                                   bandwidth_observer);
-}
-
-void ChannelSendProxy::ResetSenderCongestionControlObjects() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->ResetSenderCongestionControlObjects();
-}
-
-std::vector<ReportBlock> ChannelSendProxy::GetRemoteRTCPReportBlocks() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  std::vector<webrtc::ReportBlock> blocks;
-  int error = channel_->GetRemoteRTCPReportBlocks(&blocks);
-  RTC_DCHECK_EQ(0, error);
-  return blocks;
-}
-
-ANAStats ChannelSendProxy::GetANAStatistics() const {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->GetANAStatistics();
-}
-
-bool ChannelSendProxy::SetSendTelephoneEventPayloadType(int payload_type,
-                                                        int payload_frequency) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SetSendTelephoneEventPayloadType(payload_type,
-                                                    payload_frequency) == 0;
-}
-
-bool ChannelSendProxy::SendTelephoneEventOutband(int event, int duration_ms) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  return channel_->SendTelephoneEventOutband(event, duration_ms) == 0;
-}
-
-void ChannelSendProxy::SetBitrate(int bitrate_bps,
-                                  int64_t probing_interval_ms) {
-  // This method can be called on the worker thread, module process thread
-  // or on a TaskQueue via VideoSendStreamImpl::OnEncoderConfigurationChanged.
-  // TODO(solenberg): Figure out a good way to check this or enforce calling
-  // rules.
-  // RTC_DCHECK(worker_thread_checker_.CalledOnValidThread() ||
-  //            module_process_thread_checker_.CalledOnValidThread());
-  channel_->SetBitRate(bitrate_bps, probing_interval_ms);
-}
-
-int ChannelSendProxy::GetBitrate() const {
-  return channel_->GetBitRate();
-}
-
-void ChannelSendProxy::SetInputMute(bool muted) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetInputMute(muted);
-}
-
-void ChannelSendProxy::ProcessAndEncodeAudio(
-    std::unique_ptr<AudioFrame> audio_frame) {
-  RTC_DCHECK_RUNS_SERIALIZED(&audio_thread_race_checker_);
-  return channel_->ProcessAndEncodeAudio(std::move(audio_frame));
-}
-
-void ChannelSendProxy::SetTransportOverhead(int transport_overhead_per_packet) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetTransportOverhead(transport_overhead_per_packet);
-}
-
-RtpRtcp* ChannelSendProxy::GetRtpRtcp() const {
-  RTC_DCHECK(module_process_thread_checker_.CalledOnValidThread());
-  return channel_->GetRtpRtcp();
-}
-
-void ChannelSendProxy::OnTwccBasedUplinkPacketLossRate(float packet_loss_rate) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->OnTwccBasedUplinkPacketLossRate(packet_loss_rate);
-}
-
-void ChannelSendProxy::OnRecoverableUplinkPacketLossRate(
-    float recoverable_packet_loss_rate) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->OnRecoverableUplinkPacketLossRate(recoverable_packet_loss_rate);
-}
-
-void ChannelSendProxy::StartSend() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  int error = channel_->StartSend();
-  RTC_DCHECK_EQ(0, error);
-}
-
-void ChannelSendProxy::StopSend() {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->StopSend();
-}
-
-ChannelSend* ChannelSendProxy::GetChannel() const {
-  return channel_.get();
-}
-
-void ChannelSendProxy::SetFrameEncryptor(
-    rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor) {
-  RTC_DCHECK(worker_thread_checker_.CalledOnValidThread());
-  channel_->SetFrameEncryptor(frame_encryptor);
-}
-
-}  // namespace voe
-}  // namespace webrtc
diff --git a/audio/channel_send_proxy.h b/audio/channel_send_proxy.h
deleted file mode 100644
index 3146830..0000000
--- a/audio/channel_send_proxy.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_CHANNEL_SEND_PROXY_H_
-#define AUDIO_CHANNEL_SEND_PROXY_H_
-
-#include <memory>
-#include <string>
-#include <vector>
-
-#include "api/audio_codecs/audio_encoder.h"
-#include "audio/channel_send.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/race_checker.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class FrameEncryptorInterface;
-class RtcpBandwidthObserver;
-class RtpRtcp;
-class RtpTransportControllerSendInterface;
-class Transport;
-
-namespace voe {
-
-// This class provides the "view" of a voe::Channel that we need to implement
-// webrtc::AudioSendStream. It serves two purposes:
-//  1. Allow mocking just the interfaces used, instead of the entire
-//     voe::Channel class.
-//  2. Provide a refined interface for the stream classes, including assumptions
-//     on return values and input adaptation.
-class ChannelSendProxy {
- public:
-  ChannelSendProxy();
-  explicit ChannelSendProxy(std::unique_ptr<ChannelSend> channel);
-  virtual ~ChannelSendProxy();
-
-  // Shared with ChannelReceiveProxy
-  virtual void SetLocalSSRC(uint32_t ssrc);
-  virtual void SetNACKStatus(bool enable, int max_packets);
-  virtual CallSendStatistics GetRTCPStatistics() const;
-  virtual void RegisterTransport(Transport* transport);
-  virtual bool ReceivedRTCPPacket(const uint8_t* packet, size_t length);
-
-  virtual bool SetEncoder(int payload_type,
-                          std::unique_ptr<AudioEncoder> encoder);
-  virtual void ModifyEncoder(
-      rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier);
-
-  virtual void SetRTCPStatus(bool enable);
-  virtual void SetMid(const std::string& mid, int extension_id);
-  virtual void SetRTCP_CNAME(const std::string& c_name);
-  virtual void SetExtmapAllowMixed(bool extmap_allow_mixed);
-  virtual void SetSendAudioLevelIndicationStatus(bool enable, int id);
-  virtual void EnableSendTransportSequenceNumber(int id);
-  virtual void RegisterSenderCongestionControlObjects(
-      RtpTransportControllerSendInterface* transport,
-      RtcpBandwidthObserver* bandwidth_observer);
-  virtual void ResetSenderCongestionControlObjects();
-  virtual std::vector<ReportBlock> GetRemoteRTCPReportBlocks() const;
-  virtual ANAStats GetANAStatistics() const;
-  virtual bool SetSendTelephoneEventPayloadType(int payload_type,
-                                                int payload_frequency);
-  virtual bool SendTelephoneEventOutband(int event, int duration_ms);
-  virtual void SetBitrate(int bitrate_bps, int64_t probing_interval_ms);
-  virtual int GetBitrate() const;
-  virtual void SetInputMute(bool muted);
-
-  virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame);
-  virtual void SetTransportOverhead(int transport_overhead_per_packet);
-  virtual RtpRtcp* GetRtpRtcp() const;
-
-  virtual void OnTwccBasedUplinkPacketLossRate(float packet_loss_rate);
-  virtual void OnRecoverableUplinkPacketLossRate(
-      float recoverable_packet_loss_rate);
-  virtual void StartSend();
-  virtual void StopSend();
-
-  // Needed by ChannelReceiveProxy::AssociateSendChannel.
-  virtual ChannelSend* GetChannel() const;
-
-  // E2EE Custom Audio Frame Encryption (Optional)
-  virtual void SetFrameEncryptor(
-      rtc::scoped_refptr<FrameEncryptorInterface> frame_encryptor);
-
- private:
-  // Thread checkers document and lock usage of some methods on voe::Channel to
-  // specific threads we know about. The goal is to eventually split up
-  // voe::Channel into parts with single-threaded semantics, and thereby reduce
-  // the need for locks.
-  rtc::ThreadChecker worker_thread_checker_;
-  rtc::ThreadChecker module_process_thread_checker_;
-  // Methods accessed from audio and video threads are checked for sequential-
-  // only access. We don't necessarily own and control these threads, so thread
-  // checkers cannot be used. E.g. Chromium may transfer "ownership" from one
-  // audio thread to another, but access is still sequential.
-  rtc::RaceChecker audio_thread_race_checker_;
-  rtc::RaceChecker video_capture_thread_race_checker_;
-  std::unique_ptr<ChannelSend> channel_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(ChannelSendProxy);
-};
-}  // namespace voe
-}  // namespace webrtc
-
-#endif  // AUDIO_CHANNEL_SEND_PROXY_H_
diff --git a/audio/mock_voe_channel_proxy.h b/audio/mock_voe_channel_proxy.h
index 962152f..eee25c5 100644
--- a/audio/mock_voe_channel_proxy.h
+++ b/audio/mock_voe_channel_proxy.h
@@ -17,15 +17,15 @@
 #include <vector>
 
 #include "api/test/mock_frame_encryptor.h"
-#include "audio/channel_receive_proxy.h"
-#include "audio/channel_send_proxy.h"
+#include "audio/channel_receive.h"
+#include "audio/channel_send.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "test/gmock.h"
 
 namespace webrtc {
 namespace test {
 
-class MockChannelReceiveProxy : public voe::ChannelReceiveProxy {
+class MockChannelReceive : public voe::ChannelReceiveInterface {
  public:
   MOCK_METHOD1(SetLocalSSRC, void(uint32_t ssrc));
   MOCK_METHOD2(SetNACKStatus, void(bool enable, int max_packets));
@@ -47,10 +47,10 @@
                AudioMixer::Source::AudioFrameInfo(int sample_rate_hz,
                                                   AudioFrame* audio_frame));
   MOCK_CONST_METHOD0(PreferredSampleRate, int());
-  MOCK_METHOD1(AssociateSendChannel,
-               void(const voe::ChannelSendProxy& send_channel_proxy));
-  MOCK_METHOD0(DisassociateSendChannel, void());
+  MOCK_METHOD1(SetAssociatedSendChannel,
+               void(const voe::ChannelSendInterface* send_channel));
   MOCK_CONST_METHOD0(GetPlayoutTimestamp, uint32_t());
+  MOCK_CONST_METHOD0(GetSyncInfo, absl::optional<Syncable::Info>());
   MOCK_METHOD1(SetMinimumPlayoutDelay, void(int delay_ms));
   MOCK_CONST_METHOD1(GetRecCodec, bool(CodecInst* codec_inst));
   MOCK_METHOD1(SetReceiveCodecs,
@@ -60,7 +60,7 @@
   MOCK_METHOD0(StopPlayout, void());
 };
 
-class MockChannelSendProxy : public voe::ChannelSendProxy {
+class MockChannelSend : public voe::ChannelSendInterface {
  public:
   // GMock doesn't like move-only types, like std::unique_ptr.
   virtual bool SetEncoder(int payload_type,
@@ -72,10 +72,9 @@
   MOCK_METHOD1(
       ModifyEncoder,
       void(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier));
-  MOCK_METHOD1(SetRTCPStatus, void(bool enable));
+  MOCK_METHOD2(SetMid, void(const std::string& mid, int extension_id));
   MOCK_METHOD1(SetLocalSSRC, void(uint32_t ssrc));
-  MOCK_METHOD1(SetRTCP_CNAME, void(const std::string& c_name));
-  MOCK_METHOD2(SetNACKStatus, void(bool enable, int max_packets));
+  MOCK_METHOD1(SetRTCP_CNAME, void(absl::string_view c_name));
   MOCK_METHOD1(SetExtmapAllowMixed, void(bool extmap_allow_mixed));
   MOCK_METHOD2(SetSendAudioLevelIndicationStatus, void(bool enable, int id));
   MOCK_METHOD1(EnableSendTransportSequenceNumber, void(int id));
@@ -89,9 +88,8 @@
   MOCK_METHOD2(SetSendTelephoneEventPayloadType,
                bool(int payload_type, int payload_frequency));
   MOCK_METHOD2(SendTelephoneEventOutband, bool(int event, int duration_ms));
-  MOCK_METHOD2(SetBitrate, void(int bitrate_bps, int64_t probing_interval_ms));
+  MOCK_METHOD1(OnBitrateAllocation, void(BitrateAllocationUpdate update));
   MOCK_METHOD1(SetInputMute, void(bool muted));
-  MOCK_METHOD1(RegisterTransport, void(Transport* transport));
   MOCK_METHOD2(ReceivedRTCPPacket, bool(const uint8_t* packet, size_t length));
   // GMock doesn't like move-only types, like std::unique_ptr.
   virtual void ProcessAndEncodeAudio(std::unique_ptr<AudioFrame> audio_frame) {
@@ -99,12 +97,14 @@
   }
   MOCK_METHOD1(ProcessAndEncodeAudioForMock,
                void(std::unique_ptr<AudioFrame>* audio_frame));
-  MOCK_METHOD1(SetTransportOverhead, void(int transport_overhead_per_packet));
+  MOCK_METHOD1(SetTransportOverhead,
+               void(size_t transport_overhead_per_packet));
   MOCK_CONST_METHOD0(GetRtpRtcp, RtpRtcp*());
   MOCK_CONST_METHOD0(GetBitrate, int());
   MOCK_METHOD1(OnTwccBasedUplinkPacketLossRate, void(float packet_loss_rate));
   MOCK_METHOD1(OnRecoverableUplinkPacketLossRate,
                void(float recoverable_packet_loss_rate));
+  MOCK_CONST_METHOD0(GetRTT, int64_t());
   MOCK_METHOD0(StartSend, void());
   MOCK_METHOD0(StopSend, void());
   MOCK_METHOD1(
diff --git a/audio/time_interval.cc b/audio/time_interval.cc
deleted file mode 100644
index cc10340..0000000
--- a/audio/time_interval.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/time_interval.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/timeutils.h"
-
-namespace webrtc {
-
-TimeInterval::TimeInterval() = default;
-TimeInterval::~TimeInterval() = default;
-
-void TimeInterval::Extend() {
-  Extend(rtc::TimeMillis());
-}
-
-void TimeInterval::Extend(int64_t time) {
-  if (!interval_) {
-    interval_.emplace(time, time);
-  } else {
-    if (time < interval_->first) {
-      interval_->first = time;
-    }
-    if (time > interval_->last) {
-      interval_->last = time;
-    }
-  }
-}
-
-void TimeInterval::Extend(const TimeInterval& other_interval) {
-  if (!other_interval.Empty()) {
-    Extend(other_interval.interval_->first);
-    Extend(other_interval.interval_->last);
-  }
-}
-
-bool TimeInterval::Empty() const {
-  return !interval_;
-}
-
-int64_t TimeInterval::Length() const {
-  RTC_DCHECK(interval_);
-  return interval_->last - interval_->first;
-}
-
-TimeInterval::Interval::Interval(int64_t first, int64_t last)
-    : first(first), last(last) {}
-
-}  // namespace webrtc
diff --git a/audio/time_interval.h b/audio/time_interval.h
deleted file mode 100644
index 79fe29d..0000000
--- a/audio/time_interval.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- *  Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef AUDIO_TIME_INTERVAL_H_
-#define AUDIO_TIME_INTERVAL_H_
-
-#include <stdint.h>
-
-#include "absl/types/optional.h"
-
-namespace webrtc {
-
-// This class logs the first and last time its Extend() function is called.
-//
-// This class is not thread-safe; Extend() calls should only be made by a
-// single thread at a time, such as within a lock or destructor.
-//
-// Example usage:
-//   // let x < y < z < u < v
-//   rtc::TimeInterval interval;
-//   ...  //   interval.Extend(); // at time x
-//   ...
-//   interval.Extend(); // at time y
-//   ...
-//   interval.Extend(); // at time u
-//   ...
-//   interval.Extend(z); // at time v
-//   ...
-//   if (!interval.Empty()) {
-//     int64_t active_time = interval.Length(); // returns (u - x)
-//   }
-class TimeInterval {
- public:
-  TimeInterval();
-  ~TimeInterval();
-  // Extend the interval with the current time.
-  void Extend();
-  // Extend the interval with a given time.
-  void Extend(int64_t time);
-  // Take the convex hull with another interval.
-  void Extend(const TimeInterval& other_interval);
-  // True iff Extend has never been called.
-  bool Empty() const;
-  // Returns the time between the first and the last tick, in milliseconds.
-  int64_t Length() const;
-
- private:
-  struct Interval {
-    Interval(int64_t first, int64_t last);
-
-    int64_t first, last;
-  };
-  absl::optional<Interval> interval_;
-};
-
-}  // namespace webrtc
-
-#endif  // AUDIO_TIME_INTERVAL_H_
diff --git a/audio/time_interval_unittest.cc b/audio/time_interval_unittest.cc
deleted file mode 100644
index deff6e3..0000000
--- a/audio/time_interval_unittest.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  Copyright 2017 The WebRTC Project Authors. All rights reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "audio/time_interval.h"
-#include "api/units/time_delta.h"
-#include "rtc_base/fakeclock.h"
-#include "test/gtest.h"
-
-namespace webrtc {
-
-TEST(TimeIntervalTest, TimeInMs) {
-  rtc::ScopedFakeClock fake_clock;
-  TimeInterval interval;
-  interval.Extend();
-  fake_clock.AdvanceTime(TimeDelta::ms(100));
-  interval.Extend();
-  EXPECT_EQ(interval.Length(), 100);
-}
-
-TEST(TimeIntervalTest, Empty) {
-  TimeInterval interval;
-  EXPECT_TRUE(interval.Empty());
-  interval.Extend();
-  EXPECT_FALSE(interval.Empty());
-  interval.Extend(200);
-  EXPECT_FALSE(interval.Empty());
-}
-
-TEST(TimeIntervalTest, MonotoneIncreasing) {
-  const size_t point_count = 7;
-  const int64_t interval_points[] = {3, 2, 5, 0, 4, 1, 6};
-  const int64_t interval_differences[] = {0, 1, 3, 5, 5, 5, 6};
-  TimeInterval interval;
-  EXPECT_TRUE(interval.Empty());
-  for (size_t i = 0; i < point_count; ++i) {
-    interval.Extend(interval_points[i]);
-    EXPECT_EQ(interval_differences[i], interval.Length());
-  }
-}
-
-}  // namespace webrtc
diff --git a/audio/utility/BUILD.gn b/audio/utility/BUILD.gn
index 76c09a5..11a65bd 100644
--- a/audio/utility/BUILD.gn
+++ b/audio/utility/BUILD.gn
@@ -21,7 +21,6 @@
   ]
 
   deps = [
-    "../..:webrtc_common",
     "../../api/audio:audio_frame_api",
     "../../rtc_base:checks",
     "../../rtc_base:rtc_base_approved",
diff --git a/common_audio/BUILD.gn b/common_audio/BUILD.gn
index 911c050..74b0c60 100644
--- a/common_audio/BUILD.gn
+++ b/common_audio/BUILD.gn
@@ -47,7 +47,6 @@
   deps = [
     ":common_audio_c",
     ":sinc_resampler",
-    "..:webrtc_common",
     "../rtc_base:checks",
     "../rtc_base:gtest_prod",
     "../rtc_base:rtc_base_approved",
@@ -182,7 +181,6 @@
   deps = [
     ":common_audio_c_arm_asm",
     ":common_audio_cc",
-    "..:webrtc_common",
     "../rtc_base:checks",
     "../rtc_base:compile_assert_c",
     "../rtc_base:rtc_base_approved",
@@ -202,7 +200,6 @@
   ]
 
   deps = [
-    "..:webrtc_common",
     "../rtc_base:rtc_base_approved",
     "../system_wrappers",
   ]
@@ -213,7 +210,6 @@
     "resampler/sinc_resampler.h",
   ]
   deps = [
-    "..:webrtc_common",
     "../rtc_base:gtest_prod",
     "../rtc_base:rtc_base_approved",
     "../rtc_base/memory:aligned_malloc",
@@ -382,7 +378,6 @@
       ":fir_filter",
       ":fir_filter_factory",
       ":sinc_resampler",
-      "..:webrtc_common",
       "../rtc_base:checks",
       "../rtc_base:rtc_base_approved",
       "../rtc_base:rtc_base_tests_utils",
diff --git a/common_types.h b/common_types.h
index b2fcf17..848b899 100644
--- a/common_types.h
+++ b/common_types.h
@@ -17,8 +17,9 @@
 #include "absl/strings/match.h"
 // TODO(sprang): Remove this include when all usage includes it directly.
 #include "api/video/video_bitrate_allocation.h"
+// TODO(bugs.webrtc.org/7660): Delete include once downstream code is updated.
+#include "api/video/video_codec_type.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/deprecation.h"
 
 #if defined(_MSC_VER)
 // Disable "new behavior: elements of array will be default initialized"
@@ -38,29 +39,6 @@
   kVideoFrameDelta = 4,
 };
 
-// Statistics for an RTCP channel
-struct RtcpStatistics {
-  RtcpStatistics()
-      : fraction_lost(0),
-        packets_lost(0),
-        extended_highest_sequence_number(0),
-        jitter(0) {}
-
-  uint8_t fraction_lost;
-  int32_t packets_lost;  // Defined as a 24 bit signed integer in RTCP
-  uint32_t extended_highest_sequence_number;
-  uint32_t jitter;
-};
-
-class RtcpStatisticsCallback {
- public:
-  virtual ~RtcpStatisticsCallback() {}
-
-  virtual void StatisticsUpdated(const RtcpStatistics& statistics,
-                                 uint32_t ssrc) = 0;
-  virtual void CNameChanged(const char* cname, uint32_t ssrc) = 0;
-};
-
 // Statistics for RTCP packet types.
 struct RtcpPacketTypeCounter {
   RtcpPacketTypeCounter()
@@ -206,80 +184,6 @@
 // RTP
 enum { kRtpCsrcSize = 15 };  // RFC 3550 page 13
 
-// NETEQ statistics.
-struct NetworkStatistics {
-  // current jitter buffer size in ms
-  uint16_t currentBufferSize;
-  // preferred (optimal) buffer size in ms
-  uint16_t preferredBufferSize;
-  // adding extra delay due to "peaky jitter"
-  bool jitterPeaksFound;
-  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
-  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
-  uint64_t totalSamplesReceived;
-  uint64_t concealedSamples;
-  uint64_t concealmentEvents;
-  uint64_t jitterBufferDelayMs;
-  // Stats below DO NOT correspond directly to anything in the WebRTC stats
-  // Loss rate (network + late); fraction between 0 and 1, scaled to Q14.
-  uint16_t currentPacketLossRate;
-  // Late loss rate; fraction between 0 and 1, scaled to Q14.
-  union {
-    RTC_DEPRECATED uint16_t currentDiscardRate;
-  };
-  // fraction (of original stream) of synthesized audio inserted through
-  // expansion (in Q14)
-  uint16_t currentExpandRate;
-  // fraction (of original stream) of synthesized speech inserted through
-  // expansion (in Q14)
-  uint16_t currentSpeechExpandRate;
-  // fraction of synthesized speech inserted through pre-emptive expansion
-  // (in Q14)
-  uint16_t currentPreemptiveRate;
-  // fraction of data removed through acceleration (in Q14)
-  uint16_t currentAccelerateRate;
-  // fraction of data coming from secondary decoding (in Q14)
-  uint16_t currentSecondaryDecodedRate;
-  // Fraction of secondary data, including FEC and RED, that is discarded (in
-  // Q14). Discarding of secondary data can be caused by the reception of the
-  // primary data, obsoleting the secondary data. It can also be caused by early
-  // or late arrival of secondary data.
-  uint16_t currentSecondaryDiscardedRate;
-  // clock-drift in parts-per-million (negative or positive)
-  int32_t clockDriftPPM;
-  // average packet waiting time in the jitter buffer (ms)
-  int meanWaitingTimeMs;
-  // median packet waiting time in the jitter buffer (ms)
-  int medianWaitingTimeMs;
-  // min packet waiting time in the jitter buffer (ms)
-  int minWaitingTimeMs;
-  // max packet waiting time in the jitter buffer (ms)
-  int maxWaitingTimeMs;
-  // added samples in off mode due to packet loss
-  size_t addedSamples;
-};
-
-// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
-struct AudioDecodingCallStats {
-  AudioDecodingCallStats()
-      : calls_to_silence_generator(0),
-        calls_to_neteq(0),
-        decoded_normal(0),
-        decoded_plc(0),
-        decoded_cng(0),
-        decoded_plc_cng(0),
-        decoded_muted_output(0) {}
-
-  int calls_to_silence_generator;  // Number of calls where silence generated,
-                                   // and NetEq was disengaged from decoding.
-  int calls_to_neteq;              // Number of calls to NetEq.
-  int decoded_normal;  // Number of calls where audio RTP packet decoded.
-  int decoded_plc;     // Number of calls resulted in PLC.
-  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
-  int decoded_plc_cng;       // Number of calls resulted where PLC faded to CNG.
-  int decoded_muted_output;  // Number of calls returning a muted state output.
-};
-
 // ==================================================================
 // Video specific types
 // ==================================================================
@@ -318,18 +222,6 @@
 
 }  // namespace H264
 
-// Video codec types
-enum VideoCodecType {
-  // There are various memset(..., 0, ...) calls in the code that rely on
-  // kVideoCodecGeneric being zero.
-  kVideoCodecGeneric = 0,
-  kVideoCodecVP8,
-  kVideoCodecVP9,
-  kVideoCodecH264,
-  kVideoCodecI420,
-  kVideoCodecMultiplex,
-};
-
 struct SpatialLayer {
   bool operator==(const SpatialLayer& other) const;
   bool operator!=(const SpatialLayer& other) const { return !(*this == other); }
diff --git a/cras-config/aec_config.cc b/cras-config/aec_config.cc
index adbd453..d0cf19b 100644
--- a/cras-config/aec_config.cc
+++ b/cras-config/aec_config.cc
@@ -118,6 +118,8 @@
 		AEC_GET_FLOAT(ini, ERLE, MAX_H);
 	config->erle.onset_detection =
 		AEC_GET_INT(ini, ERLE, ONSET_DETECTION);
+	config->erle.num_sections =
+		AEC_GET_INT(ini, ERLE, NUM_SECTIONS);
 
 	config->ep_strength.lf =
 		AEC_GET_FLOAT(ini, EP_STRENGTH, LF);
diff --git a/cras-config/aec_config.h b/cras-config/aec_config.h
index 5318ac9..33185b3 100644
--- a/cras-config/aec_config.h
+++ b/cras-config/aec_config.h
@@ -120,6 +120,8 @@
 #define AEC_ERLE_MAX_H_VALUE 1.5f
 #define AEC_ERLE_ONSET_DETECTION "erle:onset_detection"
 #define AEC_ERLE_ONSET_DETECTION_VALUE 1
+#define AEC_ERLE_NUM_SECTIONS "erle:num_sections"
+#define AEC_ERLE_NUM_SECTIONS_VALUE 1
 
 // EpStrength
 #define AEC_EP_STRENGTH_LF "ep_strength:lf"
@@ -156,10 +158,10 @@
 #define AEC_ECHO_AUDIBILITY_AUDIBILITY_THRESHOLD_HF_VALUE 10
 #define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES \
 	"echo_audibility:use_stationary_properties"
-#define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES_VALUE 1
+#define AEC_ECHO_AUDIBILITY_USE_STATIONARY_PROPERTIES_VALUE 0
 #define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT \
 	 "echo_audibility:use_stationarity_properties_at_init"
-#define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT_VALUE 1
+#define AEC_ECHO_AUDIBILITY_USE_STATIONARITY_PROPERTIES_AT_INIT_VALUE 0
 
 // Rendering levels
 #define AEC_RENDER_LEVELS_ACTIVE_RENDER_LIMIT \
@@ -279,10 +281,10 @@
 
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD \
 	"suppressor.dominant_nearend_detection:enr_threshold"
-#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD_VALUE 4.f
+#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_THRESHOLD_VALUE .25f
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD \
 	"suppressor.dominant_nearend_detection:enr_exit_threshold"
-#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD_VALUE .1f
+#define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_ENR_EXIT_THRESHOLD_VALUE 10.f
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_SNR_THRESHOLD \
 	"suppressor.dominant_nearend_detection:snr_threshold"
 #define AEC_SUPPRESSOR_DOMINANT_NEAREND_DETECTION_SNR_THRESHOLD_VALUE 30.f
diff --git a/cras-config/apm_config.cc b/cras-config/apm_config.cc
index b03f8d5..407025b 100644
--- a/cras-config/apm_config.cc
+++ b/cras-config/apm_config.cc
@@ -18,11 +18,14 @@
 		ini, key,	\
 		key ## _VALUE)
 
+typedef webrtc::AudioProcessing::Config ApConfig;
+
 void apm_config_apply(dictionary *ini, webrtc::AudioProcessing *apm)
 {
-	webrtc::AudioProcessing::Config config;
+	ApConfig config;
 	webrtc::GainControl::Mode agc_mode;
 	webrtc::NoiseSuppression::Level ns_level;
+	int level_estimator;
 
 	if (ini == NULL)
 		return;
@@ -37,12 +40,19 @@
 			APM_GET_FLOAT(ini, APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR);
 	config.gain_controller2.enabled =
 			APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ENABLED);
-	config.gain_controller2.adaptive_digital_mode =
-		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE);
-	config.gain_controller2.extra_saturation_margin_db =
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB);
-	config.gain_controller2.fixed_gain_db =
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_GAIN_DB);
+	config.gain_controller2.adaptive_digital.enabled =
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_ENABLED);
+	config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
+		APM_GET_FLOAT(ini, ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB);
+	level_estimator = APM_GET_INT(
+		ini, ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR);
+	config.gain_controller2.adaptive_digital.level_estimator =
+		static_cast<ApConfig::GainController2::LevelEstimator>(
+			level_estimator);
+	config.gain_controller2.adaptive_digital.use_saturation_protector =
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR);
+	config.gain_controller2.fixed_digital.gain_db = 0;
+		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB);
 	apm->ApplyConfig(config);
 
 	apm->gain_control()->set_compression_gain_db(
@@ -73,12 +83,17 @@
 		APM_GET_FLOAT(ini, APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR));
 	syslog(LOG_ERR, "gain_controller2_enabled %u",
 		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ENABLED));
-	syslog(LOG_ERR, "gain_controller2_adaptive_digital_mode %d",
-		APM_GET_INT(ini, APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE));
-	syslog(LOG_ERR, "gain_controller2_extra_saturation_margin_db %f",
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB));
-	syslog(LOG_ERR, "gain_controller2_fixed_gain_db %f",
-		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_GAIN_DB));
+	syslog(LOG_ERR, "adaptive_digital_enabled %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_ENABLED));
+	syslog(LOG_ERR, "adaptive_digital_extra_saturation_margin_db %f",
+		APM_GET_FLOAT(ini,
+			ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB));
+	syslog(LOG_ERR, "adaptive_digital_level_estimator %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR));
+	syslog(LOG_ERR, "adaptive_digital_use_saturation_protector %d",
+		APM_GET_INT(ini, ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR));
+	syslog(LOG_ERR, "gain_controller2_fixed_digital_gain_db %f",
+		APM_GET_FLOAT(ini, APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB));
 	syslog(LOG_ERR, "gain_control_compression_gain_db %u",
 		APM_GET_INT(ini, APM_GAIN_CONTROL_COMPRESSION_GAIN_DB));
 	syslog(LOG_ERR, "gain_control_mode %u",
diff --git a/cras-config/apm_config.h b/cras-config/apm_config.h
index a223877..8c2f823 100644
--- a/cras-config/apm_config.h
+++ b/cras-config/apm_config.h
@@ -20,13 +20,23 @@
 #define APM_PRE_AMPLIFIER_FIXED_GAIN_FACTOR_VALUE 1.f
 #define APM_GAIN_CONTROLLER2_ENABLED "apm:gain_controller2_enabled"
 #define APM_GAIN_CONTROLLER2_ENABLED_VALUE 0
-#define APM_GAIN_CONTROLLER2_FIXED_GAIN_DB "apm:gain_controller2_fixed_gain_db"
-#define APM_GAIN_CONTROLLER2_FIXED_GAIN_DB_VALUE 0.f
-#define APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE "apm:gain_controller2_adaptive_digital_mode"
-#define APM_GAIN_CONTROLLER2_ADAPTIVE_DIGITAL_MODE_VALUE 1
-#define APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB \
-	"apm:gain_controller2_extra_saturation_margin_db"
-#define APM_GAIN_CONTROLLER2_EXTRA_SATURATION_MARGIN_DB_VALUE 2.f
+#define APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB \
+	"apm:gain_controller2_fixed_digital_gain_db"
+#define APM_GAIN_CONTROLLER2_FIXED_DIGITAL_GAIN_DB_VALUE 0.f
+
+/* Keys for AudioProcessing::GainController2 */
+#define ADAPTIVE_DIGITAL_ENABLED "apm:adaptive_digital_enabled"
+#define ADAPTIVE_DIGITAL_ENABLED_VALUE 0
+#define ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR \
+	"apm:adaptive_digital_level_estimator"
+#define ADAPTIVE_DIGITAL_LEVEL_ESTIMATOR_VALUE 0
+#define ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB \
+	"apm:adaptive_digital_extra_saturation_margin_db"
+#define ADAPTIVE_DIGITAL_EXTRA_SATURATION_MARGIN_DB_VALUE 2.f
+#define ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR \
+	"apm:adaptive_digital_use_saturation_protector"
+#define ADAPTIVE_DIGITAL_USE_SATURATION_PROTECTOR_VALUE 1
+
 #define APM_GAIN_CONTROL_COMPRESSION_GAIN_DB "apm:gain_control_compression_gain_db"
 #define APM_GAIN_CONTROL_COMPRESSION_GAIN_DB_VALUE 9
 /* 0: adaptive analog, 1: adaptive digital, 2: fixed digital */
diff --git a/modules/audio_coding/BUILD.gn b/modules/audio_coding/BUILD.gn
index ec81697..df4ba23 100644
--- a/modules/audio_coding/BUILD.gn
+++ b/modules/audio_coding/BUILD.gn
@@ -14,31 +14,6 @@
 
 visibility = [ ":*" ]
 
-audio_codec_deps = [
-  ":g711",
-  ":pcm16b",
-]
-if (rtc_include_ilbc) {
-  audio_codec_deps += [ ":ilbc" ]
-}
-if (rtc_include_opus) {
-  audio_codec_deps += [ ":webrtc_opus" ]
-}
-if (current_cpu == "arm") {
-  audio_codec_deps += [ ":isac_fix" ]
-} else {
-  audio_codec_deps += [ ":isac" ]
-}
-audio_codec_deps += [ ":g722" ]
-if (!build_with_mozilla && !build_with_chromium) {
-  audio_codec_deps += [ ":red" ]
-}
-audio_coding_deps = audio_codec_deps + [
-                      "../..:webrtc_common",
-                      "../../common_audio",
-                      "../../system_wrappers",
-                    ]
-
 rtc_static_library("audio_format_conversion") {
   visibility += webrtc_default_visibility
   sources = [
@@ -63,7 +38,6 @@
   # TODO(bugs.webrtc.org/9808): Move to private visibility as soon as that
   # client code gets updated.
   visibility += [ "*" ]
-  allow_poison = [ "audio_codecs" ]
 
   sources = [
     "acm2/acm_codec_database.cc",
@@ -72,22 +46,18 @@
     "acm2/rent_a_codec.h",
   ]
   deps = [
-           "../../rtc_base:checks",
-           "../../api:array_view",
-           "//third_party/abseil-cpp/absl/strings",
-           "//third_party/abseil-cpp/absl/types:optional",
-           "../../api/audio_codecs:audio_codecs_api",
-           "../..:webrtc_common",
-           "../../rtc_base:protobuf_utils",
-           "../../rtc_base:rtc_base_approved",
-           "../../system_wrappers",
-           ":audio_coding_module_typedefs",
-           ":isac_common",
-           ":isac_fix_c",
-           ":audio_encoder_cng",
-           ":neteq_decoder_enum",
-         ] + audio_codec_deps
-
+    ":audio_coding_module_typedefs",
+    ":neteq_decoder_enum",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../rtc_base:checks",
+    "../../rtc_base:protobuf_utils",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+    "//third_party/abseil-cpp/absl/strings",
+    "//third_party/abseil-cpp/absl/types:optional",
+  ]
   defines = audio_codec_defines
 }
 
@@ -97,12 +67,12 @@
   ]
   deps = [
     "../..:webrtc_common",
+    "../../rtc_base:deprecation",
   ]
 }
 
 rtc_static_library("audio_coding") {
   visibility += [ "*" ]
-  allow_poison = [ "audio_codecs" ]  # TODO(bugs.webrtc.org/8396): Remove.
   sources = [
     "acm2/acm_receiver.cc",
     "acm2/acm_receiver.h",
@@ -111,40 +81,34 @@
     "acm2/audio_coding_module.cc",
     "acm2/call_statistics.cc",
     "acm2/call_statistics.h",
-    "acm2/codec_manager.cc",
-    "acm2/codec_manager.h",
     "include/audio_coding_module.h",
   ]
 
   defines = []
 
-  if (rtc_include_opus) {
-    public_deps = [
-      ":webrtc_opus",
-    ]
-  }
-
-  deps = audio_coding_deps + [
-           "../../system_wrappers:metrics",
-           "../../api/audio:audio_frame_api",
-           "..:module_api",
-           "..:module_api_public",
-           "../../common_audio:common_audio_c",
-           "../../rtc_base:deprecation",
-           "../../rtc_base:checks",
-           "../../api:array_view",
-           "../../api/audio_codecs:audio_codecs_api",
-           ":audio_coding_module_typedefs",
-           ":neteq",
-           ":neteq_decoder_enum",
-           ":rent_a_codec",
-           "../../rtc_base:audio_format_to_string",
-           "../../rtc_base:rtc_base_approved",
-           "//third_party/abseil-cpp/absl/strings",
-           "//third_party/abseil-cpp/absl/types:optional",
-           "../../logging:rtc_event_log_api",
-         ]
-  defines = audio_coding_defines
+  deps = [
+    ":audio_coding_module_typedefs",
+    ":neteq",
+    ":neteq_decoder_enum",
+    ":rent_a_codec",
+    "..:module_api",
+    "..:module_api_public",
+    "../..:webrtc_common",
+    "../../api:array_view",
+    "../../api/audio:audio_frame_api",
+    "../../api/audio_codecs:audio_codecs_api",
+    "../../common_audio:common_audio",
+    "../../common_audio:common_audio_c",
+    "../../logging:rtc_event_log_api",
+    "../../rtc_base:audio_format_to_string",
+    "../../rtc_base:checks",
+    "../../rtc_base:deprecation",
+    "../../rtc_base:rtc_base_approved",
+    "../../system_wrappers",
+    "../../system_wrappers:metrics",
+    "//third_party/abseil-cpp/absl/strings",
+    "//third_party/abseil-cpp/absl/types:optional",
+  ]
 }
 
 rtc_static_library("legacy_encoded_audio_frame") {
@@ -910,7 +874,7 @@
     proto_out_dir = "modules/audio_coding/audio_network_adaptor"
   }
   proto_library("ana_config_proto") {
-    visibility += webrtc_default_visibility
+    visibility += [ "*" ]
     sources = [
       "audio_network_adaptor/config.proto",
     ]
@@ -1061,8 +1025,6 @@
     "neteq/random_vector.h",
     "neteq/red_payload_splitter.cc",
     "neteq/red_payload_splitter.h",
-    "neteq/rtcp.cc",
-    "neteq/rtcp.h",
     "neteq/statistics_calculator.cc",
     "neteq/statistics_calculator.h",
     "neteq/sync_buffer.cc",
@@ -1172,7 +1134,6 @@
     "../../rtc_base:checks",
     "../../rtc_base:rtc_base",
     "../../rtc_base:rtc_base_approved",
-    "../../rtc_base:rtc_base_tests_utils",
     "../../rtc_base/system:arch",
     "../../test:rtp_test_utils",
     "../rtp_rtcp",
@@ -1280,6 +1241,30 @@
 }
 
 if (rtc_include_tests) {
+  audio_coding_deps = [
+    "../../common_audio",
+    "../../system_wrappers",
+    "../..:webrtc_common",
+    ":audio_encoder_cng",
+    ":g711",
+    ":g722",
+    ":pcm16b",
+  ]
+  if (rtc_include_ilbc) {
+    audio_coding_deps += [ ":ilbc" ]
+  }
+  if (rtc_include_opus) {
+    audio_coding_deps += [ ":webrtc_opus" ]
+  }
+  if (current_cpu == "arm") {
+    audio_coding_deps += [ ":isac_fix" ]
+  } else {
+    audio_coding_deps += [ ":isac" ]
+  }
+  if (!build_with_mozilla && !build_with_chromium) {
+    audio_coding_deps += [ ":red" ]
+  }
+
   rtc_source_set("mocks") {
     testonly = true
     sources = [
@@ -1368,6 +1353,7 @@
       ":audio_format_conversion",
       ":pcm16b_c",
       ":red",
+      ":webrtc_opus_c",
       "..:module_api",
       "../..:webrtc_common",
       "../../api/audio:audio_frame_api",
@@ -2007,8 +1993,6 @@
       "acm2/acm_receiver_unittest.cc",
       "acm2/audio_coding_module_unittest.cc",
       "acm2/call_statistics_unittest.cc",
-      "acm2/codec_manager_unittest.cc",
-      "acm2/rent_a_codec_unittest.cc",
       "audio_network_adaptor/audio_network_adaptor_impl_unittest.cc",
       "audio_network_adaptor/bitrate_controller_unittest.cc",
       "audio_network_adaptor/channel_controller_unittest.cc",
@@ -2119,6 +2103,7 @@
       "../../logging:mocks",
       "../../logging:rtc_event_audio",
       "../../logging:rtc_event_log_api",
+      "../../modules/rtp_rtcp:rtp_rtcp_format",
       "../../rtc_base:checks",
       "../../rtc_base:protobuf_utils",
       "../../rtc_base:rtc_base",
diff --git a/modules/audio_coding/acm2/acm_codec_database.cc b/modules/audio_coding/acm2/acm_codec_database.cc
index 879082c..cada80c 100644
--- a/modules/audio_coding/acm2/acm_codec_database.cc
+++ b/modules/audio_coding/acm2/acm_codec_database.cc
@@ -298,7 +298,7 @@
 int ACMCodecDB::CodecId(const char* payload_name,
                         int frequency,
                         size_t channels) {
-  for (const CodecInst& ci : RentACodec::Database()) {
+  for (const CodecInst& ci : database_) {
     bool name_match = false;
     bool frequency_match = false;
     bool channels_match = false;
@@ -318,7 +318,7 @@
 
     if (name_match && frequency_match && channels_match) {
       // We have found a matching codec in the list.
-      return &ci - RentACodec::Database().data();
+      return &ci - database_;
     }
   }
 
diff --git a/modules/audio_coding/acm2/acm_receiver.cc b/modules/audio_coding/acm2/acm_receiver.cc
index 3411d90..d3af7c0 100644
--- a/modules/audio_coding/acm2/acm_receiver.cc
+++ b/modules/audio_coding/acm2/acm_receiver.cc
@@ -18,7 +18,6 @@
 #include "absl/strings/match.h"
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/audio_decoder.h"
-#include "common_types.h"
 #include "modules/audio_coding/acm2/acm_resampler.h"
 #include "modules/audio_coding/acm2/call_statistics.h"
 #include "modules/audio_coding/acm2/rent_a_codec.h"
@@ -346,6 +345,13 @@
   acm_stat->concealedSamples = neteq_lifetime_stat.concealed_samples;
   acm_stat->concealmentEvents = neteq_lifetime_stat.concealment_events;
   acm_stat->jitterBufferDelayMs = neteq_lifetime_stat.jitter_buffer_delay_ms;
+  acm_stat->delayedPacketOutageSamples =
+      neteq_lifetime_stat.delayed_packet_outage_samples;
+
+  NetEqOperationsAndState neteq_operations_and_state =
+      neteq_->GetOperationsAndState();
+  acm_stat->packetBufferFlushes =
+      neteq_operations_and_state.packet_buffer_flushes;
 }
 
 int AcmReceiver::DecoderByPayloadType(uint8_t payload_type,
diff --git a/modules/audio_coding/acm2/audio_coding_module.cc b/modules/audio_coding/acm2/audio_coding_module.cc
index 334c0e0..c0aab3a 100644
--- a/modules/audio_coding/acm2/audio_coding_module.cc
+++ b/modules/audio_coding/acm2/audio_coding_module.cc
@@ -18,7 +18,6 @@
 #include "api/array_view.h"
 #include "modules/audio_coding/acm2/acm_receiver.h"
 #include "modules/audio_coding/acm2/acm_resampler.h"
-#include "modules/audio_coding/acm2/codec_manager.h"
 #include "modules/audio_coding/acm2/rent_a_codec.h"
 #include "modules/include/module_common_types.h"
 #include "modules/include/module_common_types_public.h"
@@ -34,12 +33,6 @@
 
 namespace {
 
-struct EncoderFactory {
-  AudioEncoder* external_speech_encoder = nullptr;
-  acm2::CodecManager codec_manager;
-  acm2::RentACodec rent_a_codec;
-};
-
 class AudioCodingModuleImpl final : public AudioCodingModule {
  public:
   explicit AudioCodingModuleImpl(const AudioCodingModule::Config& config);
@@ -49,12 +42,6 @@
   //   Sender
   //
 
-  // Can be called multiple times for Codec, CNG, RED.
-  int RegisterSendCodec(const CodecInst& send_codec) override;
-
-  void RegisterExternalSendCodec(
-      AudioEncoder* external_speech_encoder) override;
-
   void ModifyEncoder(rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)>
                          modifier) override;
 
@@ -74,25 +61,9 @@
   int Add10MsData(const AudioFrame& audio_frame) override;
 
   /////////////////////////////////////////
-  // (RED) Redundant Coding
-  //
-
-  // Configure RED status i.e. on/off.
-  int SetREDStatus(bool enable_red) override;
-
-  // Get RED status.
-  bool REDStatus() const override;
-
-  /////////////////////////////////////////
   // (FEC) Forward Error Correction (codec internal)
   //
 
-  // Configure FEC status i.e. on/off.
-  int SetCodecFEC(bool enabled_codec_fec) override;
-
-  // Get FEC status.
-  bool CodecFEC() const override;
-
   // Set target packet loss rate
   int SetPacketLossRate(int loss_rate) override;
 
@@ -102,14 +73,6 @@
   //   (CNG) Comfort Noise Generation
   //
 
-  int SetVAD(bool enable_dtx = true,
-             bool enable_vad = false,
-             ACMVADMode mode = VADNormal) override;
-
-  int VAD(bool* dtx_enabled,
-          bool* vad_enabled,
-          ACMVADMode* mode) const override;
-
   int RegisterVADCallback(ACMVADCallback* vad_callback) override;
 
   /////////////////////////////////////////
@@ -130,11 +93,6 @@
   bool RegisterReceiveCodec(int rtp_payload_type,
                             const SdpAudioFormat& audio_format) override;
 
-  int RegisterReceiveCodec(const CodecInst& receive_codec) override;
-  int RegisterReceiveCodec(
-      const CodecInst& receive_codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) override;
-
   int RegisterExternalReceiveCodec(int rtp_payload_type,
                                    AudioDecoder* external_decoder,
                                    int sample_rate_hz,
@@ -222,11 +180,6 @@
     const std::string histogram_name_;
   };
 
-  int RegisterReceiveCodecUnlocked(
-      const CodecInst& codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory)
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
-
   int Add10MsDataInternal(const AudioFrame& audio_frame, InputData* input_data)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(acm_crit_sect_);
   int Encode(const InputData& input_data)
@@ -264,12 +217,7 @@
   acm2::AcmReceiver receiver_;  // AcmReceiver has it's own internal lock.
   ChangeLogger bitrate_logger_ RTC_GUARDED_BY(acm_crit_sect_);
 
-  std::unique_ptr<EncoderFactory> encoder_factory_
-      RTC_GUARDED_BY(acm_crit_sect_);
-
-  // Current encoder stack, either obtained from
-  // encoder_factory_->rent_a_codec.RentEncoderStack or provided by a call to
-  // RegisterEncoder.
+  // Current encoder stack, provided by a call to RegisterEncoder.
   std::unique_ptr<AudioEncoder> encoder_stack_ RTC_GUARDED_BY(acm_crit_sect_);
 
   std::unique_ptr<AudioDecoder> isac_decoder_16k_
@@ -405,28 +353,6 @@
   AudioEncoder* enc_;
 };
 
-// Return false on error.
-bool CreateSpeechEncoderIfNecessary(EncoderFactory* ef) {
-  auto* sp = ef->codec_manager.GetStackParams();
-  if (sp->speech_encoder) {
-    // Do nothing; we already have a speech encoder.
-  } else if (ef->codec_manager.GetCodecInst()) {
-    RTC_DCHECK(!ef->external_speech_encoder);
-    // We have no speech encoder, but we have a specification for making one.
-    std::unique_ptr<AudioEncoder> enc =
-        ef->rent_a_codec.RentEncoder(*ef->codec_manager.GetCodecInst());
-    if (!enc)
-      return false;  // Encoder spec was bad.
-    sp->speech_encoder = std::move(enc);
-  } else if (ef->external_speech_encoder) {
-    RTC_DCHECK(!ef->codec_manager.GetCodecInst());
-    // We have an external speech encoder.
-    sp->speech_encoder = std::unique_ptr<AudioEncoder>(
-        new RawAudioEncoderWrapper(ef->external_speech_encoder));
-  }
-  return true;
-}
-
 void AudioCodingModuleImpl::ChangeLogger::MaybeLog(int value) {
   if (value != last_value_ || first_time_) {
     first_time_ = false;
@@ -441,7 +367,6 @@
       expected_in_ts_(0xD87F3F9F),
       receiver_(config),
       bitrate_logger_("WebRTC.Audio.TargetBitrateInKbps"),
-      encoder_factory_(new EncoderFactory),
       encoder_stack_(nullptr),
       previous_pltype_(255),
       receiver_initialized_(false),
@@ -549,69 +474,29 @@
 //   Sender
 //
 
-// Can be called multiple times for Codec, CNG, RED.
-int AudioCodingModuleImpl::RegisterSendCodec(const CodecInst& send_codec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  if (!encoder_factory_->codec_manager.RegisterEncoder(send_codec)) {
-    return -1;
-  }
-  if (encoder_factory_->codec_manager.GetCodecInst()) {
-    encoder_factory_->external_speech_encoder = nullptr;
-  }
-  if (!CreateSpeechEncoderIfNecessary(encoder_factory_.get())) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-}
-
-void AudioCodingModuleImpl::RegisterExternalSendCodec(
-    AudioEncoder* external_speech_encoder) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  encoder_factory_->codec_manager.UnsetCodecInst();
-  encoder_factory_->external_speech_encoder = external_speech_encoder;
-  RTC_CHECK(CreateSpeechEncoderIfNecessary(encoder_factory_.get()));
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  RTC_CHECK(sp->speech_encoder);
-  encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-}
-
 void AudioCodingModuleImpl::ModifyEncoder(
     rtc::FunctionView<void(std::unique_ptr<AudioEncoder>*)> modifier) {
   rtc::CritScope lock(&acm_crit_sect_);
-
-  // Wipe the encoder factory, so that everything that relies on it will fail.
-  // We don't want the complexity of supporting swapping back and forth.
-  if (encoder_factory_) {
-    encoder_factory_.reset();
-    RTC_CHECK(!encoder_stack_);  // Ensure we hadn't started using the factory.
-  }
-
   modifier(&encoder_stack_);
 }
 
 // Get current send codec.
 absl::optional<CodecInst> AudioCodingModuleImpl::SendCodec() const {
   rtc::CritScope lock(&acm_crit_sect_);
-  if (encoder_factory_) {
-    auto* ci = encoder_factory_->codec_manager.GetCodecInst();
-    if (ci) {
-      return *ci;
-    }
-    CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-    const std::unique_ptr<AudioEncoder>& enc =
-        encoder_factory_->codec_manager.GetStackParams()->speech_encoder;
-    if (enc) {
-      return acm2::CodecManager::ForgeCodecInst(enc.get());
-    }
-    return absl::nullopt;
+  if (encoder_stack_) {
+    CodecInst ci;
+    ci.channels = encoder_stack_->NumChannels();
+    ci.plfreq = encoder_stack_->SampleRateHz();
+    ci.pacsize = rtc::CheckedDivExact(
+        static_cast<int>(encoder_stack_->Max10MsFramesInAPacket() * ci.plfreq),
+        100);
+    ci.pltype = -1;  // Not valid.
+    ci.rate = -1;    // Not valid.
+    static const char kName[] = "external";
+    memcpy(ci.plname, kName, sizeof(kName));
+    return ci;
   } else {
-    return encoder_stack_
-               ? absl::optional<CodecInst>(
-                     acm2::CodecManager::ForgeCodecInst(encoder_stack_.get()))
-               : absl::nullopt;
+    return absl::nullopt;
   }
 }
 
@@ -809,58 +694,9 @@
 }
 
 /////////////////////////////////////////
-//   (RED) Redundant Coding
-//
-
-bool AudioCodingModuleImpl::REDStatus() const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return encoder_factory_->codec_manager.GetStackParams()->use_red;
-}
-
-// Configure RED status i.e on/off.
-int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
-#ifdef WEBRTC_CODEC_RED
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetCopyRed(enable_red)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-#else
-  RTC_LOG(LS_WARNING) << "  WEBRTC_CODEC_RED is undefined";
-  return -1;
-#endif
-}
-
-/////////////////////////////////////////
 //   (FEC) Forward Error Correction (codec internal)
 //
 
-bool AudioCodingModuleImpl::CodecFEC() const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return encoder_factory_->codec_manager.GetStackParams()->use_codec_fec;
-}
-
-int AudioCodingModuleImpl::SetCodecFEC(bool enable_codec_fec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetCodecFEC(enable_codec_fec)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  if (enable_codec_fec) {
-    return sp->use_codec_fec ? 0 : -1;
-  } else {
-    RTC_DCHECK(!sp->use_codec_fec);
-    return 0;
-  }
-}
-
 int AudioCodingModuleImpl::SetPacketLossRate(int loss_rate) {
   rtc::CritScope lock(&acm_crit_sect_);
   if (HaveValidEncoder("SetPacketLossRate")) {
@@ -870,36 +706,6 @@
 }
 
 /////////////////////////////////////////
-//   (VAD) Voice Activity Detection
-//
-int AudioCodingModuleImpl::SetVAD(bool enable_dtx,
-                                  bool enable_vad,
-                                  ACMVADMode mode) {
-  // Note: |enable_vad| is not used; VAD is enabled based on the DTX setting.
-  RTC_DCHECK_EQ(enable_dtx, enable_vad);
-  rtc::CritScope lock(&acm_crit_sect_);
-  CreateSpeechEncoderIfNecessary(encoder_factory_.get());
-  if (!encoder_factory_->codec_manager.SetVAD(enable_dtx, mode)) {
-    return -1;
-  }
-  auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  if (sp->speech_encoder)
-    encoder_stack_ = encoder_factory_->rent_a_codec.RentEncoderStack(sp);
-  return 0;
-}
-
-// Get VAD/DTX settings.
-int AudioCodingModuleImpl::VAD(bool* dtx_enabled,
-                               bool* vad_enabled,
-                               ACMVADMode* mode) const {
-  rtc::CritScope lock(&acm_crit_sect_);
-  const auto* sp = encoder_factory_->codec_manager.GetStackParams();
-  *dtx_enabled = *vad_enabled = sp->use_cng;
-  *mode = sp->vad_mode;
-  return 0;
-}
-
-/////////////////////////////////////////
 //   Receiver
 //
 
@@ -957,59 +763,6 @@
   return receiver_.AddCodec(rtp_payload_type, audio_format);
 }
 
-int AudioCodingModuleImpl::RegisterReceiveCodec(const CodecInst& codec) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  auto* ef = encoder_factory_.get();
-  return RegisterReceiveCodecUnlocked(
-      codec, [&] { return ef->rent_a_codec.RentIsacDecoder(codec.plfreq); });
-}
-
-int AudioCodingModuleImpl::RegisterReceiveCodec(
-    const CodecInst& codec,
-    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
-  rtc::CritScope lock(&acm_crit_sect_);
-  return RegisterReceiveCodecUnlocked(codec, isac_factory);
-}
-
-int AudioCodingModuleImpl::RegisterReceiveCodecUnlocked(
-    const CodecInst& codec,
-    rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) {
-  RTC_DCHECK(receiver_initialized_);
-  if (codec.channels > 2) {
-    RTC_LOG_F(LS_ERROR) << "Unsupported number of channels: " << codec.channels;
-    return -1;
-  }
-
-  auto codec_id = acm2::RentACodec::CodecIdByParams(codec.plname, codec.plfreq,
-                                                    codec.channels);
-  if (!codec_id) {
-    RTC_LOG_F(LS_ERROR)
-        << "Wrong codec params to be registered as receive codec";
-    return -1;
-  }
-  auto codec_index = acm2::RentACodec::CodecIndexFromId(*codec_id);
-  RTC_CHECK(codec_index) << "Invalid codec ID: " << static_cast<int>(*codec_id);
-
-  // Check if the payload-type is valid.
-  if (!acm2::RentACodec::IsPayloadTypeValid(codec.pltype)) {
-    RTC_LOG_F(LS_ERROR) << "Invalid payload type " << codec.pltype << " for "
-                        << codec.plname;
-    return -1;
-  }
-
-  AudioDecoder* isac_decoder = nullptr;
-  if (absl::EqualsIgnoreCase(codec.plname, "isac")) {
-    std::unique_ptr<AudioDecoder>& saved_isac_decoder =
-        codec.plfreq == 16000 ? isac_decoder_16k_ : isac_decoder_32k_;
-    if (!saved_isac_decoder) {
-      saved_isac_decoder = isac_factory();
-    }
-    isac_decoder = saved_isac_decoder.get();
-  }
-  return receiver_.AddCodec(*codec_index, codec.pltype, codec.channels,
-                            codec.plfreq, isac_decoder, codec.plname);
-}
-
 int AudioCodingModuleImpl::RegisterExternalReceiveCodec(
     int rtp_payload_type,
     AudioDecoder* external_decoder,
diff --git a/modules/audio_coding/acm2/audio_coding_module_unittest.cc b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
index b227cfb..4e262f7 100644
--- a/modules/audio_coding/acm2/audio_coding_module_unittest.cc
+++ b/modules/audio_coding/acm2/audio_coding_module_unittest.cc
@@ -1183,14 +1183,14 @@
 
     // Extract and verify the audio checksum.
     std::string checksum_string = audio_checksum.Finish();
-    EXPECT_EQ(audio_checksum_ref, checksum_string);
+    ExpectChecksumEq(audio_checksum_ref, checksum_string);
 
     // Extract and verify the payload checksum.
     rtc::Buffer checksum_result(payload_checksum_->Size());
     payload_checksum_->Finish(checksum_result.data(), checksum_result.size());
     checksum_string =
         rtc::hex_encode(checksum_result.data<char>(), checksum_result.size());
-    EXPECT_EQ(payload_checksum_ref, checksum_string);
+    ExpectChecksumEq(payload_checksum_ref, checksum_string);
 
     // Verify number of packets produced.
     EXPECT_EQ(expected_packets, packet_count_);
@@ -1199,6 +1199,18 @@
     remove(output_file_name.c_str());
   }
 
+  // Helper: result must be one the "|"-separated checksums.
+  void ExpectChecksumEq(std::string ref, std::string result) {
+    if (ref.size() == result.size()) {
+      // Only one checksum: clearer message.
+      EXPECT_EQ(ref, result);
+    } else {
+      EXPECT_NE(ref.find(result), std::string::npos)
+          << result << " must be one of these:\n"
+          << ref;
+    }
+  }
+
   // Inherited from test::PacketSource.
   std::unique_ptr<test::Packet> NextPacket() override {
     auto packet = send_test_->NextPacket();
@@ -1436,21 +1448,35 @@
       50, test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
+namespace {
+// Checksum depends on libopus being compiled with or without SSE.
+const std::string audio_maybe_sse =
+    "3e285b74510e62062fbd8142dacd16e9|"
+    "fd5d57d6d766908e6a7211e2a5c7f78a";
+const std::string payload_maybe_sse =
+    "78cf8f03157358acdc69f6835caa0d9b|"
+    "b693bd95c2ee2354f92340dd09e9da68";
+// Common checksums.
+const std::string audio_checksum =
+    AcmReceiverBitExactnessOldApi::PlatformChecksum(
+        audio_maybe_sse,
+        audio_maybe_sse,
+        "439e97ad1932c49923b5da029c17dd5e",
+        "038ec90f5f3fc2320f3090f8ecef6bb7",
+        "038ec90f5f3fc2320f3090f8ecef6bb7");
+const std::string payload_checksum =
+    AcmReceiverBitExactnessOldApi::PlatformChecksum(
+        payload_maybe_sse,
+        payload_maybe_sse,
+        "ab88b1a049c36bdfeb7e8b057ef6982a",
+        "27fef7b799393347ec3b5694369a1c36",
+        "27fef7b799393347ec3b5694369a1c36");
+};  // namespace
+
 TEST_F(AcmSenderBitExactnessOldApi, Opus_stereo_20ms) {
   ASSERT_NO_FATAL_FAILURE(SetUpTest("opus", 48000, 2, 120, 960, 960));
-  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "3e285b74510e62062fbd8142dacd16e9",
-          "3e285b74510e62062fbd8142dacd16e9",
-          "439e97ad1932c49923b5da029c17dd5e",
-          "038ec90f5f3fc2320f3090f8ecef6bb7",
-          "038ec90f5f3fc2320f3090f8ecef6bb7"),
-      AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "ab88b1a049c36bdfeb7e8b057ef6982a",
-          "27fef7b799393347ec3b5694369a1c36",
-          "27fef7b799393347ec3b5694369a1c36"),
-      50, test::AcmReceiveTestOldApi::kStereoOutput);
+  Run(audio_checksum, payload_checksum, 50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
 TEST_F(AcmSenderBitExactnessNewApi, MAYBE_OpusFromFormat_stereo_20ms) {
@@ -1458,19 +1484,8 @@
       SdpAudioFormat("opus", 48000, 2, {{"stereo", "1"}}));
   ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
       AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
-  Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "3e285b74510e62062fbd8142dacd16e9",
-          "3e285b74510e62062fbd8142dacd16e9",
-          "439e97ad1932c49923b5da029c17dd5e",
-          "038ec90f5f3fc2320f3090f8ecef6bb7",
-          "038ec90f5f3fc2320f3090f8ecef6bb7"),
-      AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "78cf8f03157358acdc69f6835caa0d9b",
-          "ab88b1a049c36bdfeb7e8b057ef6982a",
-          "27fef7b799393347ec3b5694369a1c36",
-          "27fef7b799393347ec3b5694369a1c36"),
-      50, test::AcmReceiveTestOldApi::kStereoOutput);
+  Run(audio_checksum, payload_checksum, 50,
+      test::AcmReceiveTestOldApi::kStereoOutput);
 }
 
 TEST_F(AcmSenderBitExactnessNewApi, OpusFromFormat_stereo_20ms_voip) {
@@ -1480,15 +1495,19 @@
   config->application = AudioEncoderOpusConfig::ApplicationMode::kVoip;
   ASSERT_NO_FATAL_FAILURE(SetUpTestExternalEncoder(
       AudioEncoderOpus::MakeAudioEncoder(*config, 120), 120));
+  // Checksum depends on libopus being compiled with or without SSE.
+  const std::string audio_maybe_sse =
+      "b0325df4e8104f04e03af23c0b75800e|"
+      "3cd4e1bc2acd9440bb9e97af34080ffc";
+  const std::string payload_maybe_sse =
+      "4eab2259b6fe24c22dd242a113e0b3d9|"
+      "4fc0af0aa06c26454af09832d3ec1b4e";
   Run(AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "b0325df4e8104f04e03af23c0b75800e",
-          "b0325df4e8104f04e03af23c0b75800e",
-          "1c81121f5d9286a5a865d01dbab22ce8",
+          audio_maybe_sse, audio_maybe_sse, "1c81121f5d9286a5a865d01dbab22ce8",
           "11d547f89142e9ef03f37d7ca7f32379",
           "11d547f89142e9ef03f37d7ca7f32379"),
       AcmReceiverBitExactnessOldApi::PlatformChecksum(
-          "4eab2259b6fe24c22dd242a113e0b3d9",
-          "4eab2259b6fe24c22dd242a113e0b3d9",
+          payload_maybe_sse, payload_maybe_sse,
           "839ea60399447268ee0f0262a50b75fd",
           "1815fd5589cad0c6f6cf946c76b81aeb",
           "1815fd5589cad0c6f6cf946c76b81aeb"),
diff --git a/modules/audio_coding/acm2/call_statistics.h b/modules/audio_coding/acm2/call_statistics.h
index 9dced64..5d94ac4 100644
--- a/modules/audio_coding/acm2/call_statistics.h
+++ b/modules/audio_coding/acm2/call_statistics.h
@@ -12,7 +12,7 @@
 #define MODULES_AUDIO_CODING_ACM2_CALL_STATISTICS_H_
 
 #include "api/audio/audio_frame.h"
-#include "common_types.h"  // NOLINT(build/include)
+#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
 
 //
 // This class is for book keeping of calls to ACM. It is not useful to log API
diff --git a/modules/audio_coding/acm2/codec_manager.cc b/modules/audio_coding/acm2/codec_manager.cc
deleted file mode 100644
index eda6555..0000000
--- a/modules/audio_coding/acm2/codec_manager.cc
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_coding/acm2/codec_manager.h"
-
-#include <string.h>
-#include <map>
-#include <memory>
-#include <utility>
-
-#include "absl/strings/match.h"
-#include "api/array_view.h"
-#include "api/audio_codecs/audio_encoder.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "rtc_base/checks.h"
-#include "rtc_base/logging.h"
-
-namespace webrtc {
-namespace acm2 {
-
-namespace {
-
-// Check if the given codec is a valid to be registered as send codec.
-int IsValidSendCodec(const CodecInst& send_codec) {
-  if ((send_codec.channels != 1) && (send_codec.channels != 2)) {
-    RTC_LOG(LS_ERROR) << "Wrong number of channels (" << send_codec.channels
-                      << "), only mono and stereo are supported)";
-    return -1;
-  }
-
-  auto maybe_codec_id = RentACodec::CodecIdByInst(send_codec);
-  if (!maybe_codec_id) {
-    RTC_LOG(LS_ERROR) << "Invalid codec setting for the send codec.";
-    return -1;
-  }
-
-  // Telephone-event cannot be a send codec.
-  if (absl::EqualsIgnoreCase(send_codec.plname, "telephone-event")) {
-    RTC_LOG(LS_ERROR) << "telephone-event cannot be a send codec";
-    return -1;
-  }
-
-  if (!RentACodec::IsSupportedNumChannels(*maybe_codec_id, send_codec.channels)
-           .value_or(false)) {
-    RTC_LOG(LS_ERROR) << send_codec.channels
-                      << " number of channels not supported for "
-                      << send_codec.plname << ".";
-    return -1;
-  }
-  return RentACodec::CodecIndexFromId(*maybe_codec_id).value_or(-1);
-}
-
-bool IsOpus(const CodecInst& codec) {
-  return
-#ifdef WEBRTC_CODEC_OPUS
-      absl::EqualsIgnoreCase(codec.plname, "opus") ||
-#endif
-      false;
-}
-
-}  // namespace
-
-CodecManager::CodecManager() {
-  thread_checker_.DetachFromThread();
-}
-
-CodecManager::~CodecManager() = default;
-
-bool CodecManager::RegisterEncoder(const CodecInst& send_codec) {
-  RTC_DCHECK(thread_checker_.CalledOnValidThread());
-  int codec_id = IsValidSendCodec(send_codec);
-
-  // Check for reported errors from function IsValidSendCodec().
-  if (codec_id < 0) {
-    return false;
-  }
-
-  switch (RentACodec::RegisterRedPayloadType(
-      &codec_stack_params_.red_payload_types, send_codec)) {
-    case RentACodec::RegistrationResult::kOk:
-      return true;
-    case RentACodec::RegistrationResult::kBadFreq:
-      RTC_LOG(LS_ERROR)
-          << "RegisterSendCodec() failed, invalid frequency for RED"
-             " registration";
-      return false;
-    case RentACodec::RegistrationResult::kSkip:
-      break;
-  }
-  switch (RentACodec::RegisterCngPayloadType(
-      &codec_stack_params_.cng_payload_types, send_codec)) {
-    case RentACodec::RegistrationResult::kOk:
-      return true;
-    case RentACodec::RegistrationResult::kBadFreq:
-      RTC_LOG(LS_ERROR)
-          << "RegisterSendCodec() failed, invalid frequency for CNG"
-             " registration";
-      return false;
-    case RentACodec::RegistrationResult::kSkip:
-      break;
-  }
-
-  if (IsOpus(send_codec)) {
-    // VAD/DTX not supported.
-    codec_stack_params_.use_cng = false;
-  }
-
-  send_codec_inst_ = send_codec;
-  recreate_encoder_ = true;  // Caller must recreate it.
-  return true;
-}
-
-CodecInst CodecManager::ForgeCodecInst(
-    const AudioEncoder* external_speech_encoder) {
-  CodecInst ci;
-  ci.channels = external_speech_encoder->NumChannels();
-  ci.plfreq = external_speech_encoder->SampleRateHz();
-  ci.pacsize = rtc::CheckedDivExact(
-      static_cast<int>(external_speech_encoder->Max10MsFramesInAPacket() *
-                       ci.plfreq),
-      100);
-  ci.pltype = -1;  // Not valid.
-  ci.rate = -1;    // Not valid.
-  static const char kName[] = "external";
-  memcpy(ci.plname, kName, sizeof(kName));
-  return ci;
-}
-
-bool CodecManager::SetCopyRed(bool enable) {
-  if (enable && codec_stack_params_.use_codec_fec) {
-    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
-    return false;
-  }
-  if (enable && send_codec_inst_ &&
-      codec_stack_params_.red_payload_types.count(send_codec_inst_->plfreq) <
-          1) {
-    RTC_LOG(LS_WARNING) << "Cannot enable RED at " << send_codec_inst_->plfreq
-                        << " Hz.";
-    return false;
-  }
-  codec_stack_params_.use_red = enable;
-  return true;
-}
-
-bool CodecManager::SetVAD(bool enable, ACMVADMode mode) {
-  // Sanity check of the mode.
-  RTC_DCHECK(mode == VADNormal || mode == VADLowBitrate || mode == VADAggr ||
-             mode == VADVeryAggr);
-
-  // Check that the send codec is mono. We don't support VAD/DTX for stereo
-  // sending.
-  const bool stereo_send =
-      codec_stack_params_.speech_encoder
-          ? (codec_stack_params_.speech_encoder->NumChannels() != 1)
-          : false;
-  if (enable && stereo_send) {
-    RTC_LOG(LS_ERROR) << "VAD/DTX not supported for stereo sending";
-    return false;
-  }
-
-  // TODO(kwiberg): This doesn't protect Opus when injected as an external
-  // encoder.
-  if (send_codec_inst_ && IsOpus(*send_codec_inst_)) {
-    // VAD/DTX not supported, but don't fail.
-    enable = false;
-  }
-
-  codec_stack_params_.use_cng = enable;
-  codec_stack_params_.vad_mode = mode;
-  return true;
-}
-
-bool CodecManager::SetCodecFEC(bool enable_codec_fec) {
-  if (enable_codec_fec && codec_stack_params_.use_red) {
-    RTC_LOG(LS_WARNING) << "Codec internal FEC and RED cannot be co-enabled.";
-    return false;
-  }
-
-  codec_stack_params_.use_codec_fec = enable_codec_fec;
-  return true;
-}
-
-bool CodecManager::MakeEncoder(RentACodec* rac, AudioCodingModule* acm) {
-  RTC_DCHECK(rac);
-  RTC_DCHECK(acm);
-
-  if (!recreate_encoder_) {
-    bool error = false;
-    // Try to re-use the speech encoder we've given to the ACM.
-    acm->ModifyEncoder([&](std::unique_ptr<AudioEncoder>* encoder) {
-      if (!*encoder) {
-        // There is no existing encoder.
-        recreate_encoder_ = true;
-        return;
-      }
-
-      // Extract the speech encoder from the ACM.
-      std::unique_ptr<AudioEncoder> enc = std::move(*encoder);
-      while (true) {
-        auto sub_enc = enc->ReclaimContainedEncoders();
-        if (sub_enc.empty()) {
-          break;
-        }
-        RTC_CHECK_EQ(1, sub_enc.size());
-
-        // Replace enc with its sub encoder. We need to put the sub encoder in
-        // a temporary first, since otherwise the old value of enc would be
-        // destroyed before the new value got assigned, which would be bad
-        // since the new value is a part of the old value.
-        auto tmp_enc = std::move(sub_enc[0]);
-        enc = std::move(tmp_enc);
-      }
-
-      // Wrap it in a new encoder stack and put it back.
-      codec_stack_params_.speech_encoder = std::move(enc);
-      *encoder = rac->RentEncoderStack(&codec_stack_params_);
-      if (!*encoder) {
-        error = true;
-      }
-    });
-    if (error) {
-      return false;
-    }
-    if (!recreate_encoder_) {
-      return true;
-    }
-  }
-
-  if (!send_codec_inst_) {
-    // We don't have the information we need to create a new speech encoder.
-    // (This is not an error.)
-    return true;
-  }
-
-  codec_stack_params_.speech_encoder = rac->RentEncoder(*send_codec_inst_);
-  auto stack = rac->RentEncoderStack(&codec_stack_params_);
-  if (!stack) {
-    return false;
-  }
-  acm->SetEncoder(std::move(stack));
-  recreate_encoder_ = false;
-  return true;
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/codec_manager.h b/modules/audio_coding/acm2/codec_manager.h
deleted file mode 100644
index 22dbf4e..0000000
--- a/modules/audio_coding/acm2/codec_manager.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
-#define MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
-
-#include "absl/types/optional.h"
-#include "common_types.h"  // NOLINT(build/include)
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "modules/audio_coding/include/audio_coding_module.h"
-#include "modules/audio_coding/include/audio_coding_module_typedefs.h"
-#include "rtc_base/constructormagic.h"
-#include "rtc_base/thread_checker.h"
-
-namespace webrtc {
-
-class AudioEncoder;
-
-namespace acm2 {
-
-class CodecManager final {
- public:
-  CodecManager();
-  ~CodecManager();
-
-  // Parses the given specification. On success, returns true and updates the
-  // stored CodecInst and stack parameters; on error, returns false.
-  bool RegisterEncoder(const CodecInst& send_codec);
-
-  static CodecInst ForgeCodecInst(const AudioEncoder* external_speech_encoder);
-
-  const CodecInst* GetCodecInst() const {
-    return send_codec_inst_ ? &*send_codec_inst_ : nullptr;
-  }
-
-  void UnsetCodecInst() { send_codec_inst_ = absl::nullopt; }
-
-  const RentACodec::StackParameters* GetStackParams() const {
-    return &codec_stack_params_;
-  }
-  RentACodec::StackParameters* GetStackParams() { return &codec_stack_params_; }
-
-  bool SetCopyRed(bool enable);
-
-  bool SetVAD(bool enable, ACMVADMode mode);
-
-  bool SetCodecFEC(bool enable_codec_fec);
-
-  // Uses the provided Rent-A-Codec to create a new encoder stack, if we have a
-  // complete specification; if so, it is then passed to set_encoder. On error,
-  // returns false.
-  bool MakeEncoder(RentACodec* rac, AudioCodingModule* acm);
-
- private:
-  rtc::ThreadChecker thread_checker_;
-  absl::optional<CodecInst> send_codec_inst_;
-  RentACodec::StackParameters codec_stack_params_;
-  bool recreate_encoder_ = true;  // Need to recreate encoder?
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(CodecManager);
-};
-
-}  // namespace acm2
-}  // namespace webrtc
-#endif  // MODULES_AUDIO_CODING_ACM2_CODEC_MANAGER_H_
diff --git a/modules/audio_coding/acm2/codec_manager_unittest.cc b/modules/audio_coding/acm2/codec_manager_unittest.cc
deleted file mode 100644
index 6a5ea5f..0000000
--- a/modules/audio_coding/acm2/codec_manager_unittest.cc
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "modules/audio_coding/acm2/codec_manager.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "test/gtest.h"
-#include "test/mock_audio_encoder.h"
-
-namespace webrtc {
-namespace acm2 {
-
-using ::testing::Return;
-
-namespace {
-
-// Create a MockAudioEncoder with some reasonable default behavior.
-std::unique_ptr<MockAudioEncoder> CreateMockEncoder() {
-  auto enc = std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
-  EXPECT_CALL(*enc, SampleRateHz()).WillRepeatedly(Return(8000));
-  EXPECT_CALL(*enc, NumChannels()).WillRepeatedly(Return(1));
-  EXPECT_CALL(*enc, Max10MsFramesInAPacket()).WillRepeatedly(Return(1));
-  return enc;
-}
-
-}  // namespace
-
-TEST(CodecManagerTest, ExternalEncoderFec) {
-  auto enc0 = CreateMockEncoder();
-  auto enc1 = CreateMockEncoder();
-  auto enc2 = CreateMockEncoder();
-  {
-    ::testing::InSequence s;
-    EXPECT_CALL(*enc0, SetFec(false)).WillOnce(Return(true));
-    EXPECT_CALL(*enc1, SetFec(true)).WillOnce(Return(true));
-    EXPECT_CALL(*enc2, SetFec(true)).WillOnce(Return(false));
-  }
-
-  CodecManager cm;
-  RentACodec rac;
-
-  // use_codec_fec starts out false.
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-  cm.GetStackParams()->speech_encoder = std::move(enc0);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-
-  // Set it to true.
-  EXPECT_EQ(true, cm.SetCodecFEC(true));
-  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
-  cm.GetStackParams()->speech_encoder = std::move(enc1);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_TRUE(cm.GetStackParams()->use_codec_fec);
-
-  // Switch to a codec that doesn't support it.
-  cm.GetStackParams()->speech_encoder = std::move(enc2);
-  EXPECT_TRUE(rac.RentEncoderStack(cm.GetStackParams()));
-  EXPECT_FALSE(cm.GetStackParams()->use_codec_fec);
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.cc b/modules/audio_coding/acm2/rent_a_codec.cc
index 7601519..bfddc42b 100644
--- a/modules/audio_coding/acm2/rent_a_codec.cc
+++ b/modules/audio_coding/acm2/rent_a_codec.cc
@@ -13,35 +13,9 @@
 #include <memory>
 #include <utility>
 
-#include "absl/strings/match.h"
-#include "modules/audio_coding/codecs/cng/audio_encoder_cng.h"
-#include "modules/audio_coding/codecs/g711/audio_encoder_pcm.h"
-#include "modules/audio_coding/codecs/g722/audio_encoder_g722.h"
 #include "rtc_base/logging.h"
-#ifdef WEBRTC_CODEC_ILBC
-#include "modules/audio_coding/codecs/ilbc/audio_encoder_ilbc.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_ISACFX
-#include "modules/audio_coding/codecs/isac/fix/include/audio_decoder_isacfix.h"  // nogncheck
-#include "modules/audio_coding/codecs/isac/fix/include/audio_encoder_isacfix.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_ISAC
-#include "modules/audio_coding/codecs/isac/main/include/audio_decoder_isac.h"  // nogncheck
-#include "modules/audio_coding/codecs/isac/main/include/audio_encoder_isac.h"  // nogncheck
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-#include "modules/audio_coding/codecs/opus/audio_encoder_opus.h"
-#endif
-#include "modules/audio_coding/codecs/pcm16b/audio_encoder_pcm16b.h"
-#ifdef WEBRTC_CODEC_RED
-#include "modules/audio_coding/codecs/red/audio_encoder_copy_red.h"  // nogncheck
-#endif
 #include "modules/audio_coding/acm2/acm_codec_database.h"
 
-#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
-#include "modules/audio_coding/codecs/isac/locked_bandwidth_info.h"
-#endif
-
 namespace webrtc {
 namespace acm2 {
 
@@ -55,7 +29,8 @@
 
 absl::optional<CodecInst> RentACodec::CodecInstById(CodecId codec_id) {
   absl::optional<int> mi = CodecIndexFromId(codec_id);
-  return mi ? absl::optional<CodecInst>(Database()[*mi]) : absl::nullopt;
+  return mi ? absl::optional<CodecInst>(ACMCodecDB::database_[*mi])
+            : absl::nullopt;
 }
 
 absl::optional<RentACodec::CodecId> RentACodec::CodecIdByInst(
@@ -81,20 +56,6 @@
   return ci;
 }
 
-absl::optional<bool> RentACodec::IsSupportedNumChannels(CodecId codec_id,
-                                                        size_t num_channels) {
-  auto i = CodecIndexFromId(codec_id);
-  return i ? absl::optional<bool>(
-                 ACMCodecDB::codec_settings_[*i].channel_support >=
-                 num_channels)
-           : absl::nullopt;
-}
-
-rtc::ArrayView<const CodecInst> RentACodec::Database() {
-  return rtc::ArrayView<const CodecInst>(ACMCodecDB::database_,
-                                         NumberOfCodecs());
-}
-
 absl::optional<NetEqDecoder> RentACodec::NetEqDecoderFromCodecId(
     CodecId codec_id,
     size_t num_channels) {
@@ -107,200 +68,5 @@
              : ned;
 }
 
-RentACodec::RegistrationResult RentACodec::RegisterCngPayloadType(
-    std::map<int, int>* pt_map,
-    const CodecInst& codec_inst) {
-  if (!absl::EqualsIgnoreCase(codec_inst.plname, "CN"))
-    return RegistrationResult::kSkip;
-  switch (codec_inst.plfreq) {
-    case 8000:
-    case 16000:
-    case 32000:
-    case 48000:
-      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
-      return RegistrationResult::kOk;
-    default:
-      return RegistrationResult::kBadFreq;
-  }
-}
-
-RentACodec::RegistrationResult RentACodec::RegisterRedPayloadType(
-    std::map<int, int>* pt_map,
-    const CodecInst& codec_inst) {
-  if (!absl::EqualsIgnoreCase(codec_inst.plname, "RED"))
-    return RegistrationResult::kSkip;
-  switch (codec_inst.plfreq) {
-    case 8000:
-      (*pt_map)[codec_inst.plfreq] = codec_inst.pltype;
-      return RegistrationResult::kOk;
-    default:
-      return RegistrationResult::kBadFreq;
-  }
-}
-
-namespace {
-
-// Returns a new speech encoder, or null on error.
-// TODO(kwiberg): Don't handle errors here (bug 5033)
-std::unique_ptr<AudioEncoder> CreateEncoder(
-    const CodecInst& speech_inst,
-    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "isac"))
-    return std::unique_ptr<AudioEncoder>(
-        new AudioEncoderIsacFixImpl(speech_inst, bwinfo));
-#endif
-#if defined(WEBRTC_CODEC_ISAC)
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "isac"))
-    return std::unique_ptr<AudioEncoder>(
-        new AudioEncoderIsacFloatImpl(speech_inst, bwinfo));
-#endif
-#ifdef WEBRTC_CODEC_OPUS
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "opus"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderOpusImpl(speech_inst));
-#endif
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "pcmu"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmU(speech_inst));
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "pcma"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcmA(speech_inst));
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "l16"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderPcm16B(speech_inst));
-#ifdef WEBRTC_CODEC_ILBC
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "ilbc"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderIlbcImpl(speech_inst));
-#endif
-  if (absl::EqualsIgnoreCase(speech_inst.plname, "g722"))
-    return std::unique_ptr<AudioEncoder>(new AudioEncoderG722Impl(speech_inst));
-  RTC_LOG_F(LS_ERROR) << "Could not create encoder of type "
-                      << speech_inst.plname;
-  return std::unique_ptr<AudioEncoder>();
-}
-
-std::unique_ptr<AudioEncoder> CreateRedEncoder(
-    std::unique_ptr<AudioEncoder> encoder,
-    int red_payload_type) {
-#ifdef WEBRTC_CODEC_RED
-  AudioEncoderCopyRed::Config config;
-  config.payload_type = red_payload_type;
-  config.speech_encoder = std::move(encoder);
-  return std::unique_ptr<AudioEncoder>(
-      new AudioEncoderCopyRed(std::move(config)));
-#else
-  return std::unique_ptr<AudioEncoder>();
-#endif
-}
-
-std::unique_ptr<AudioEncoder> CreateCngEncoder(
-    std::unique_ptr<AudioEncoder> encoder,
-    int payload_type,
-    ACMVADMode vad_mode) {
-  AudioEncoderCngConfig config;
-  config.num_channels = encoder->NumChannels();
-  config.payload_type = payload_type;
-  config.speech_encoder = std::move(encoder);
-  switch (vad_mode) {
-    case VADNormal:
-      config.vad_mode = Vad::kVadNormal;
-      break;
-    case VADLowBitrate:
-      config.vad_mode = Vad::kVadLowBitrate;
-      break;
-    case VADAggr:
-      config.vad_mode = Vad::kVadAggressive;
-      break;
-    case VADVeryAggr:
-      config.vad_mode = Vad::kVadVeryAggressive;
-      break;
-    default:
-      FATAL();
-  }
-  return CreateComfortNoiseEncoder(std::move(config));
-}
-
-std::unique_ptr<AudioDecoder> CreateIsacDecoder(
-    int sample_rate_hz,
-    const rtc::scoped_refptr<LockedIsacBandwidthInfo>& bwinfo) {
-#if defined(WEBRTC_CODEC_ISACFX)
-  return std::unique_ptr<AudioDecoder>(
-      new AudioDecoderIsacFixImpl(sample_rate_hz, bwinfo));
-#elif defined(WEBRTC_CODEC_ISAC)
-  return std::unique_ptr<AudioDecoder>(
-      new AudioDecoderIsacFloatImpl(sample_rate_hz, bwinfo));
-#else
-  FATAL() << "iSAC is not supported.";
-  return std::unique_ptr<AudioDecoder>();
-#endif
-}
-
-}  // namespace
-
-RentACodec::RentACodec() {
-#if defined(WEBRTC_CODEC_ISACFX) || defined(WEBRTC_CODEC_ISAC)
-  isac_bandwidth_info_ = new LockedIsacBandwidthInfo;
-#endif
-}
-RentACodec::~RentACodec() = default;
-
-std::unique_ptr<AudioEncoder> RentACodec::RentEncoder(
-    const CodecInst& codec_inst) {
-  return CreateEncoder(codec_inst, isac_bandwidth_info_);
-}
-
-RentACodec::StackParameters::StackParameters() {
-  // Register the default payload types for RED and CNG.
-  for (const CodecInst& ci : RentACodec::Database()) {
-    RentACodec::RegisterCngPayloadType(&cng_payload_types, ci);
-    RentACodec::RegisterRedPayloadType(&red_payload_types, ci);
-  }
-}
-
-RentACodec::StackParameters::~StackParameters() = default;
-
-std::unique_ptr<AudioEncoder> RentACodec::RentEncoderStack(
-    StackParameters* param) {
-  if (!param->speech_encoder)
-    return nullptr;
-
-  if (param->use_codec_fec) {
-    // Switch FEC on. On failure, remember that FEC is off.
-    if (!param->speech_encoder->SetFec(true))
-      param->use_codec_fec = false;
-  } else {
-    // Switch FEC off. This shouldn't fail.
-    const bool success = param->speech_encoder->SetFec(false);
-    RTC_DCHECK(success);
-  }
-
-  auto pt = [&param](const std::map<int, int>& m) {
-    auto it = m.find(param->speech_encoder->SampleRateHz());
-    return it == m.end() ? absl::nullopt : absl::optional<int>(it->second);
-  };
-  auto cng_pt = pt(param->cng_payload_types);
-  param->use_cng =
-      param->use_cng && cng_pt && param->speech_encoder->NumChannels() == 1;
-  auto red_pt = pt(param->red_payload_types);
-  param->use_red = param->use_red && red_pt;
-
-  if (param->use_cng || param->use_red) {
-    // The RED and CNG encoders need to be in sync with the speech encoder, so
-    // reset the latter to ensure its buffer is empty.
-    param->speech_encoder->Reset();
-  }
-  std::unique_ptr<AudioEncoder> encoder_stack =
-      std::move(param->speech_encoder);
-  if (param->use_red) {
-    encoder_stack = CreateRedEncoder(std::move(encoder_stack), *red_pt);
-  }
-  if (param->use_cng) {
-    encoder_stack =
-        CreateCngEncoder(std::move(encoder_stack), *cng_pt, param->vad_mode);
-  }
-  return encoder_stack;
-}
-
-std::unique_ptr<AudioDecoder> RentACodec::RentIsacDecoder(int sample_rate_hz) {
-  return CreateIsacDecoder(sample_rate_hz, isac_bandwidth_info_);
-}
-
 }  // namespace acm2
 }  // namespace webrtc
diff --git a/modules/audio_coding/acm2/rent_a_codec.h b/modules/audio_coding/acm2/rent_a_codec.h
index b0ad382..2cf1c6e 100644
--- a/modules/audio_coding/acm2/rent_a_codec.h
+++ b/modules/audio_coding/acm2/rent_a_codec.h
@@ -31,8 +31,7 @@
 
 namespace acm2 {
 
-class RentACodec {
- public:
+struct RentACodec {
   enum class CodecId {
 #if defined(WEBRTC_CODEC_ISAC) || defined(WEBRTC_CODEC_ISACFX)
     kISAC,
@@ -133,64 +132,9 @@
     return payload_type >= 0 && payload_type <= 127;
   }
 
-  static rtc::ArrayView<const CodecInst> Database();
-
-  static absl::optional<bool> IsSupportedNumChannels(CodecId codec_id,
-                                                     size_t num_channels);
-
   static absl::optional<NetEqDecoder> NetEqDecoderFromCodecId(
       CodecId codec_id,
       size_t num_channels);
-
-  // Parse codec_inst and extract payload types. If the given CodecInst was for
-  // the wrong sort of codec, return kSkip; otherwise, if the rate was illegal,
-  // return kBadFreq; otherwise, update the given RTP timestamp rate (Hz) ->
-  // payload type map and return kOk.
-  enum class RegistrationResult { kOk, kSkip, kBadFreq };
-  static RegistrationResult RegisterCngPayloadType(std::map<int, int>* pt_map,
-                                                   const CodecInst& codec_inst);
-  static RegistrationResult RegisterRedPayloadType(std::map<int, int>* pt_map,
-                                                   const CodecInst& codec_inst);
-
-  RentACodec();
-  ~RentACodec();
-
-  // Creates and returns an audio encoder built to the given specification.
-  // Returns null in case of error.
-  std::unique_ptr<AudioEncoder> RentEncoder(const CodecInst& codec_inst);
-
-  struct StackParameters {
-    StackParameters();
-    ~StackParameters();
-
-    std::unique_ptr<AudioEncoder> speech_encoder;
-
-    bool use_codec_fec = false;
-    bool use_red = false;
-    bool use_cng = false;
-    ACMVADMode vad_mode = VADNormal;
-
-    // Maps from RTP timestamp rate (in Hz) to payload type.
-    std::map<int, int> cng_payload_types;
-    std::map<int, int> red_payload_types;
-  };
-
-  // Creates and returns an audio encoder stack constructed to the given
-  // specification. If the specification isn't compatible with the encoder, it
-  // will be changed to match (things will be switched off). The speech encoder
-  // will be stolen. If the specification isn't complete, returns nullptr.
-  std::unique_ptr<AudioEncoder> RentEncoderStack(StackParameters* param);
-
-  // Creates and returns an iSAC decoder.
-  std::unique_ptr<AudioDecoder> RentIsacDecoder(int sample_rate_hz);
-
- private:
-  std::unique_ptr<AudioEncoder> speech_encoder_;
-  std::unique_ptr<AudioEncoder> cng_encoder_;
-  std::unique_ptr<AudioEncoder> red_encoder_;
-  rtc::scoped_refptr<LockedIsacBandwidthInfo> isac_bandwidth_info_;
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(RentACodec);
 };
 
 }  // namespace acm2
diff --git a/modules/audio_coding/acm2/rent_a_codec_unittest.cc b/modules/audio_coding/acm2/rent_a_codec_unittest.cc
deleted file mode 100644
index fd3329c..0000000
--- a/modules/audio_coding/acm2/rent_a_codec_unittest.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <memory>
-
-#include "common_types.h"
-#include "modules/audio_coding/acm2/rent_a_codec.h"
-#include "rtc_base/arraysize.h"
-#include "test/gtest.h"
-#include "test/mock_audio_encoder.h"
-
-namespace webrtc {
-namespace acm2 {
-
-using ::testing::Return;
-
-namespace {
-
-const int kDataLengthSamples = 80;
-const int kPacketSizeSamples = 2 * kDataLengthSamples;
-const int16_t kZeroData[kDataLengthSamples] = {0};
-const CodecInst kDefaultCodecInst = {0, "pcmu", 8000, kPacketSizeSamples,
-                                     1, 64000};
-const int kCngPt = 13;
-
-class Marker final {
- public:
-  MOCK_METHOD1(Mark, void(std::string desc));
-};
-
-}  // namespace
-
-class RentACodecTestF : public ::testing::Test {
- protected:
-  void CreateCodec() {
-    auto speech_encoder = rent_a_codec_.RentEncoder(kDefaultCodecInst);
-    ASSERT_TRUE(speech_encoder);
-    RentACodec::StackParameters param;
-    param.use_cng = true;
-    param.speech_encoder = std::move(speech_encoder);
-    encoder_ = rent_a_codec_.RentEncoderStack(&param);
-  }
-
-  void EncodeAndVerify(size_t expected_out_length,
-                       uint32_t expected_timestamp,
-                       int expected_payload_type,
-                       int expected_send_even_if_empty) {
-    rtc::Buffer out;
-    AudioEncoder::EncodedInfo encoded_info;
-    encoded_info = encoder_->Encode(timestamp_, kZeroData, &out);
-    timestamp_ += kDataLengthSamples;
-    EXPECT_TRUE(encoded_info.redundant.empty());
-    EXPECT_EQ(expected_out_length, encoded_info.encoded_bytes);
-    EXPECT_EQ(expected_timestamp, encoded_info.encoded_timestamp);
-    if (expected_payload_type >= 0)
-      EXPECT_EQ(expected_payload_type, encoded_info.payload_type);
-    if (expected_send_even_if_empty >= 0)
-      EXPECT_EQ(static_cast<bool>(expected_send_even_if_empty),
-                encoded_info.send_even_if_empty);
-  }
-
-  RentACodec rent_a_codec_;
-  std::unique_ptr<AudioEncoder> encoder_;
-  uint32_t timestamp_ = 0;
-};
-
-// This test verifies that CNG frames are delivered as expected. Since the frame
-// size is set to 20 ms, we expect the first encode call to produce no output
-// (which is signaled as 0 bytes output of type kNoEncoding). The next encode
-// call should produce one SID frame of 9 bytes. The third call should not
-// result in any output (just like the first one). The fourth and final encode
-// call should produce an "empty frame", which is like no output, but with
-// AudioEncoder::EncodedInfo::send_even_if_empty set to true. (The reason to
-// produce an empty frame is to drive sending of DTMF packets in the RTP/RTCP
-// module.)
-TEST_F(RentACodecTestF, VerifyCngFrames) {
-  CreateCodec();
-  uint32_t expected_timestamp = timestamp_;
-  // Verify no frame.
-  {
-    SCOPED_TRACE("First encoding");
-    EncodeAndVerify(0, expected_timestamp, -1, -1);
-  }
-
-  // Verify SID frame delivered.
-  {
-    SCOPED_TRACE("Second encoding");
-    EncodeAndVerify(9, expected_timestamp, kCngPt, 1);
-  }
-
-  // Verify no frame.
-  {
-    SCOPED_TRACE("Third encoding");
-    EncodeAndVerify(0, expected_timestamp, -1, -1);
-  }
-
-  // Verify NoEncoding.
-  expected_timestamp += 2 * kDataLengthSamples;
-  {
-    SCOPED_TRACE("Fourth encoding");
-    EncodeAndVerify(0, expected_timestamp, kCngPt, 1);
-  }
-}
-
-TEST(RentACodecTest, ExternalEncoder) {
-  const int kSampleRateHz = 8000;
-  auto* external_encoder = new MockAudioEncoder;
-  EXPECT_CALL(*external_encoder, SampleRateHz())
-      .WillRepeatedly(Return(kSampleRateHz));
-  EXPECT_CALL(*external_encoder, NumChannels()).WillRepeatedly(Return(1));
-  EXPECT_CALL(*external_encoder, SetFec(false)).WillRepeatedly(Return(true));
-
-  RentACodec rac;
-  RentACodec::StackParameters param;
-  param.speech_encoder = std::unique_ptr<AudioEncoder>(external_encoder);
-  std::unique_ptr<AudioEncoder> encoder_stack = rac.RentEncoderStack(&param);
-  EXPECT_EQ(external_encoder, encoder_stack.get());
-  const int kPacketSizeSamples = kSampleRateHz / 100;
-  int16_t audio[kPacketSizeSamples] = {0};
-  rtc::Buffer encoded;
-  AudioEncoder::EncodedInfo info;
-
-  Marker marker;
-  {
-    ::testing::InSequence s;
-    info.encoded_timestamp = 0;
-    EXPECT_CALL(*external_encoder,
-                EncodeImpl(0, rtc::ArrayView<const int16_t>(audio), &encoded))
-        .WillOnce(Return(info));
-    EXPECT_CALL(marker, Mark("A"));
-    EXPECT_CALL(marker, Mark("B"));
-    EXPECT_CALL(marker, Mark("C"));
-  }
-
-  info = encoder_stack->Encode(0, audio, &encoded);
-  EXPECT_EQ(0u, info.encoded_timestamp);
-  marker.Mark("A");
-
-  // Change to internal encoder.
-  CodecInst codec_inst = kDefaultCodecInst;
-  codec_inst.pacsize = kPacketSizeSamples;
-  param.speech_encoder = rac.RentEncoder(codec_inst);
-  ASSERT_TRUE(param.speech_encoder);
-  AudioEncoder* enc = param.speech_encoder.get();
-  std::unique_ptr<AudioEncoder> stack = rac.RentEncoderStack(&param);
-  EXPECT_EQ(enc, stack.get());
-
-  // Don't expect any more calls to the external encoder.
-  info = stack->Encode(1, audio, &encoded);
-  marker.Mark("B");
-  encoder_stack.reset();
-  marker.Mark("C");
-}
-
-// Verify that the speech encoder's Reset method is called when CNG or RED
-// (or both) are switched on, but not when they're switched off.
-void TestCngAndRedResetSpeechEncoder(bool use_cng, bool use_red) {
-  auto make_enc = [] {
-    auto speech_encoder =
-        std::unique_ptr<MockAudioEncoder>(new MockAudioEncoder);
-    EXPECT_CALL(*speech_encoder, NumChannels()).WillRepeatedly(Return(1));
-    EXPECT_CALL(*speech_encoder, Max10MsFramesInAPacket())
-        .WillRepeatedly(Return(2));
-    EXPECT_CALL(*speech_encoder, SampleRateHz()).WillRepeatedly(Return(8000));
-    EXPECT_CALL(*speech_encoder, SetFec(false)).WillRepeatedly(Return(true));
-    return speech_encoder;
-  };
-  auto speech_encoder1 = make_enc();
-  auto speech_encoder2 = make_enc();
-  Marker marker;
-  {
-    ::testing::InSequence s;
-    EXPECT_CALL(marker, Mark("disabled"));
-    EXPECT_CALL(marker, Mark("enabled"));
-    if (use_cng || use_red)
-      EXPECT_CALL(*speech_encoder2, Reset());
-  }
-
-  RentACodec::StackParameters param1, param2;
-  param1.speech_encoder = std::move(speech_encoder1);
-  param2.speech_encoder = std::move(speech_encoder2);
-  param2.use_cng = use_cng;
-  param2.use_red = use_red;
-  marker.Mark("disabled");
-  RentACodec rac;
-  rac.RentEncoderStack(&param1);
-  marker.Mark("enabled");
-  rac.RentEncoderStack(&param2);
-}
-
-TEST(RentACodecTest, CngResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(true, false);
-}
-
-TEST(RentACodecTest, RedResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(false, true);
-}
-
-TEST(RentACodecTest, CngAndRedResetsSpeechEncoder) {
-  TestCngAndRedResetSpeechEncoder(true, true);
-}
-
-TEST(RentACodecTest, NoCngAndRedNoSpeechEncoderReset) {
-  TestCngAndRedResetSpeechEncoder(false, false);
-}
-
-TEST(RentACodecTest, RentEncoderError) {
-  const CodecInst codec_inst = {
-      0, "Robert'); DROP TABLE Students;", 8000, 160, 1, 64000};
-  RentACodec rent_a_codec;
-  EXPECT_FALSE(rent_a_codec.RentEncoder(codec_inst));
-}
-
-TEST(RentACodecTest, RentEncoderStackWithoutSpeechEncoder) {
-  RentACodec::StackParameters sp;
-  EXPECT_EQ(nullptr, sp.speech_encoder);
-  EXPECT_EQ(nullptr, RentACodec().RentEncoderStack(&sp));
-}
-
-}  // namespace acm2
-}  // namespace webrtc
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
index 85084c8..9e47a06 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.cc
@@ -46,17 +46,7 @@
                                    kEventLogMinBitrateChangeBps,
                                    kEventLogMinBitrateChangeFraction,
                                    kEventLogMinPacketLossChangeFraction)
-              : nullptr),
-      enable_bitrate_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-BitrateAdaptation")),
-      enable_dtx_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-DtxAdaptation")),
-      enable_fec_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-FecAdaptation")),
-      enable_channel_adaptation_(
-          webrtc::field_trial::IsEnabled("WebRTC-Audio-ChannelAdaptation")),
-      enable_frame_length_adaptation_(webrtc::field_trial::IsEnabled(
-          "WebRTC-Audio-FrameLengthAdaptation")) {
+              : nullptr) {
   RTC_DCHECK(controller_manager_);
 }
 
@@ -157,24 +147,6 @@
   }
   prev_config_ = config;
 
-  // Prevent certain controllers from taking action (determined by field trials)
-  if (!enable_bitrate_adaptation_ && config.bitrate_bps) {
-    config.bitrate_bps.reset();
-  }
-  if (!enable_dtx_adaptation_ && config.enable_dtx) {
-    config.enable_dtx.reset();
-  }
-  if (!enable_fec_adaptation_ && config.enable_fec) {
-    config.enable_fec.reset();
-    config.uplink_packet_loss_fraction.reset();
-  }
-  if (!enable_frame_length_adaptation_ && config.frame_length_ms) {
-    config.frame_length_ms.reset();
-  }
-  if (!enable_channel_adaptation_ && config.num_channels) {
-    config.num_channels.reset();
-  }
-
   if (debug_dump_writer_)
     debug_dump_writer_->DumpEncoderRuntimeConfig(config, rtc::TimeMillis());
 
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
index d3ecce0..4c1c19b 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl.h
@@ -83,12 +83,6 @@
 
   ANAStats stats_;
 
-  const bool enable_bitrate_adaptation_;
-  const bool enable_dtx_adaptation_;
-  const bool enable_fec_adaptation_;
-  const bool enable_channel_adaptation_;
-  const bool enable_frame_length_adaptation_;
-
   RTC_DISALLOW_COPY_AND_ASSIGN(AudioNetworkAdaptorImpl);
 };
 
diff --git a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
index 5948ac3..be9550a 100644
--- a/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/audio_network_adaptor_impl_unittest.cc
@@ -51,7 +51,7 @@
     return false;
   }
   auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
-  return *ana_event->config_ == config;
+  return ana_event->config() == config;
 }
 
 MATCHER_P(EncoderRuntimeConfigIs, config, "") {
diff --git a/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
index df97594..42189c3 100644
--- a/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
+++ b/modules/audio_coding/audio_network_adaptor/event_log_writer_unittest.cc
@@ -36,7 +36,7 @@
     return false;
   }
   auto ana_event = static_cast<RtcEventAudioNetworkAdaptation*>(arg);
-  return *ana_event->config_ == config;
+  return ana_event->config() == config;
 }
 
 struct EventLogWriterStates {
diff --git a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
index 28767af..6692a51 100644
--- a/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
+++ b/modules/audio_coding/codecs/isac/main/source/entropy_coding.c
@@ -96,7 +96,7 @@
                           const int32_t gainQ10,
                           int32_t* CurveQ16) {
   int32_t CorrQ11[AR_ORDER + 1];
-  int32_t sum, tmpGain;
+  int64_t sum, tmpGain;
   int32_t diffQ16[FRAMESAMPLES / 8];
   const int16_t* CS_ptrQ9;
   int k, n;
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
index adc6656..1a88acf 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.cc
@@ -471,6 +471,8 @@
     : payload_type_(payload_type),
       send_side_bwe_with_overhead_(
           webrtc::field_trial::IsEnabled("WebRTC-SendSideBwe-WithOverhead")),
+      use_link_capacity_for_adaptation_(webrtc::field_trial::IsEnabled(
+          "WebRTC-Audio-LinkCapacityAdaptation")),
       adjust_bandwidth_(
           webrtc::field_trial::IsEnabled("WebRTC-AdjustOpusBandwidth")),
       bitrate_changed_(true),
@@ -605,7 +607,8 @@
 
 void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
     int target_audio_bitrate_bps,
-    absl::optional<int64_t> bwe_period_ms) {
+    absl::optional<int64_t> bwe_period_ms,
+    absl::optional<int64_t> link_capacity_allocation_bps) {
   if (audio_network_adaptor_) {
     audio_network_adaptor_->SetTargetAudioBitrate(target_audio_bitrate_bps);
     // We give smoothed bitrate allocation to audio network adaptor as
@@ -623,6 +626,9 @@
       bitrate_smoother_->SetTimeConstantMs(*bwe_period_ms * 4);
     bitrate_smoother_->AddSample(target_audio_bitrate_bps);
 
+    if (link_capacity_allocation_bps)
+      link_capacity_allocation_bps_ = link_capacity_allocation_bps;
+
     ApplyAudioNetworkAdaptor();
   } else if (send_side_bwe_with_overhead_) {
     if (!overhead_bytes_per_packet_) {
@@ -641,6 +647,18 @@
     SetTargetBitrate(target_audio_bitrate_bps);
   }
 }
+void AudioEncoderOpusImpl::OnReceivedUplinkBandwidth(
+    int target_audio_bitrate_bps,
+    absl::optional<int64_t> bwe_period_ms) {
+  OnReceivedUplinkBandwidth(target_audio_bitrate_bps, bwe_period_ms,
+                            absl::nullopt);
+}
+
+void AudioEncoderOpusImpl::OnReceivedUplinkAllocation(
+    BitrateAllocationUpdate update) {
+  OnReceivedUplinkBandwidth(update.target_bitrate.bps(), update.bwe_period.ms(),
+                            update.link_capacity.bps());
+}
 
 void AudioEncoderOpusImpl::OnReceivedRtt(int rtt_ms) {
   if (!audio_network_adaptor_)
@@ -875,14 +893,20 @@
 
 void AudioEncoderOpusImpl::MaybeUpdateUplinkBandwidth() {
   if (audio_network_adaptor_) {
-    int64_t now_ms = rtc::TimeMillis();
-    if (!bitrate_smoother_last_update_time_ ||
-        now_ms - *bitrate_smoother_last_update_time_ >=
-            config_.uplink_bandwidth_update_interval_ms) {
-      absl::optional<float> smoothed_bitrate = bitrate_smoother_->GetAverage();
-      if (smoothed_bitrate)
-        audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
-      bitrate_smoother_last_update_time_ = now_ms;
+    if (use_link_capacity_for_adaptation_ && link_capacity_allocation_bps_) {
+      audio_network_adaptor_->SetUplinkBandwidth(
+          *link_capacity_allocation_bps_);
+    } else {
+      int64_t now_ms = rtc::TimeMillis();
+      if (!bitrate_smoother_last_update_time_ ||
+          now_ms - *bitrate_smoother_last_update_time_ >=
+              config_.uplink_bandwidth_update_interval_ms) {
+        absl::optional<float> smoothed_bitrate =
+            bitrate_smoother_->GetAverage();
+        if (smoothed_bitrate)
+          audio_network_adaptor_->SetUplinkBandwidth(*smoothed_bitrate);
+        bitrate_smoother_last_update_time_ = now_ms;
+      }
     }
   }
 }
diff --git a/modules/audio_coding/codecs/opus/audio_encoder_opus.h b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
index c26c6da..150423f 100644
--- a/modules/audio_coding/codecs/opus/audio_encoder_opus.h
+++ b/modules/audio_coding/codecs/opus/audio_encoder_opus.h
@@ -119,6 +119,7 @@
   void OnReceivedUplinkBandwidth(
       int target_audio_bitrate_bps,
       absl::optional<int64_t> bwe_period_ms) override;
+  void OnReceivedUplinkAllocation(BitrateAllocationUpdate update) override;
   void OnReceivedRtt(int rtt_ms) override;
   void OnReceivedOverhead(size_t overhead_bytes_per_packet) override;
   void SetReceiverFrameLengthRange(int min_frame_length_ms,
@@ -164,6 +165,11 @@
   void SetNumChannelsToEncode(size_t num_channels_to_encode);
   void SetProjectedPacketLossRate(float fraction);
 
+  void OnReceivedUplinkBandwidth(
+      int target_audio_bitrate_bps,
+      absl::optional<int64_t> bwe_period_ms,
+      absl::optional<int64_t> link_capacity_allocation);
+
   // TODO(minyue): remove "override" when we can deprecate
   // |AudioEncoder::SetTargetBitrate|.
   void SetTargetBitrate(int target_bps) override;
@@ -178,6 +184,7 @@
   AudioEncoderOpusConfig config_;
   const int payload_type_;
   const bool send_side_bwe_with_overhead_;
+  const bool use_link_capacity_for_adaptation_;
   const bool adjust_bandwidth_;
   bool bitrate_changed_;
   float packet_loss_rate_;
@@ -195,6 +202,7 @@
   absl::optional<size_t> overhead_bytes_per_packet_;
   const std::unique_ptr<SmoothingFilter> bitrate_smoother_;
   absl::optional<int64_t> bitrate_smoother_last_update_time_;
+  absl::optional<int64_t> link_capacity_allocation_bps_;
   int consecutive_dtx_frames_;
 
   friend struct AudioEncoderOpus;
diff --git a/modules/audio_coding/include/audio_coding_module.h b/modules/audio_coding/include/audio_coding_module.h
index b9f2228..f9fdba5 100644
--- a/modules/audio_coding/include/audio_coding_module.h
+++ b/modules/audio_coding/include/audio_coding_module.h
@@ -154,40 +154,6 @@
   //   Sender
   //
 
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t RegisterSendCodec()
-  // Registers a codec, specified by |send_codec|, as sending codec.
-  // This API can be called multiple of times to register Codec. The last codec
-  // registered overwrites the previous ones.
-  // The API can also be used to change payload type for CNG and RED, which are
-  // registered by default to default payload types.
-  // Note that registering CNG and RED won't overwrite speech codecs.
-  // This API can be called to set/change the send payload-type, frame-size
-  // or encoding rate (if applicable for the codec).
-  //
-  // Note: If a stereo codec is registered as send codec, VAD/DTX will
-  // automatically be turned off, since it is not supported for stereo sending.
-  //
-  // Note: If a secondary encoder is already registered, and the new send-codec
-  // has a sampling rate that does not match the secondary encoder, the
-  // secondary encoder will be unregistered.
-  //
-  // Input:
-  //   -send_codec         : Parameters of the codec to be registered, c.f.
-  //                         common_types.h for the definition of
-  //                         CodecInst.
-  //
-  // Return value:
-  //   -1 if failed to initialize,
-  //    0 if succeeded.
-  //
-  virtual int32_t RegisterSendCodec(const CodecInst& send_codec) = 0;
-
-  // Registers |external_speech_encoder| as encoder. The new encoder will
-  // replace any previously registered speech encoder (internal or external).
-  virtual void RegisterExternalSendCodec(
-      AudioEncoder* external_speech_encoder) = 0;
-
   // |modifier| is called exactly once with one argument: a pointer to the
   // unique_ptr that holds the current encoder (which is null if there is no
   // current encoder). For the duration of the call, |modifier| has exclusive
@@ -258,71 +224,6 @@
   virtual int32_t Add10MsData(const AudioFrame& audio_frame) = 0;
 
   ///////////////////////////////////////////////////////////////////////////
-  // (RED) Redundant Coding
-  //
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetREDStatus()
-  // configure RED status i.e. on/off.
-  //
-  // RFC 2198 describes a solution which has a single payload type which
-  // signifies a packet with redundancy. That packet then becomes a container,
-  // encapsulating multiple payloads into a single RTP packet.
-  // Such a scheme is flexible, since any amount of redundancy may be
-  // encapsulated within a single packet.  There is, however, a small overhead
-  // since each encapsulated payload must be preceded by a header indicating
-  // the type of data enclosed.
-  //
-  // Input:
-  //   -enable_red         : if true RED is enabled, otherwise RED is
-  //                         disabled.
-  //
-  // Return value:
-  //   -1 if failed to set RED status,
-  //    0 if succeeded.
-  //
-  virtual int32_t SetREDStatus(bool enable_red) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // bool REDStatus()
-  // Get RED status
-  //
-  // Return value:
-  //   true if RED is enabled,
-  //   false if RED is disabled.
-  //
-  virtual bool REDStatus() const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // (FEC) Forward Error Correction (codec internal)
-  //
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetCodecFEC()
-  // Configures codec internal FEC status i.e. on/off. No effects on codecs that
-  // do not provide internal FEC.
-  //
-  // Input:
-  //   -enable_fec         : if true FEC will be enabled otherwise the FEC is
-  //                         disabled.
-  //
-  // Return value:
-  //   -1 if failed, or the codec does not support FEC
-  //    0 if succeeded.
-  //
-  virtual int SetCodecFEC(bool enable_codec_fec) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // bool CodecFEC()
-  // Gets status of codec internal FEC.
-  //
-  // Return value:
-  //   true if FEC is enabled,
-  //   false if FEC is disabled.
-  //
-  virtual bool CodecFEC() const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
   // int SetPacketLossRate()
   // Sets expected packet loss rate for encoding. Some encoders provide packet
   // loss gnostic encoding to make stream less sensitive to packet losses,
@@ -344,55 +245,6 @@
   //
 
   ///////////////////////////////////////////////////////////////////////////
-  // int32_t SetVAD()
-  // If DTX is enabled & the codec does not have internal DTX/VAD
-  // WebRtc VAD will be automatically enabled and |enable_vad| is ignored.
-  //
-  // If DTX is disabled but VAD is enabled no DTX packets are send,
-  // regardless of whether the codec has internal DTX/VAD or not. In this
-  // case, WebRtc VAD is running to label frames as active/in-active.
-  //
-  // NOTE! VAD/DTX is not supported when sending stereo.
-  //
-  // Inputs:
-  //   -enable_dtx         : if true DTX is enabled,
-  //                         otherwise DTX is disabled.
-  //   -enable_vad         : if true VAD is enabled,
-  //                         otherwise VAD is disabled.
-  //   -vad_mode           : determines the aggressiveness of VAD. A more
-  //                         aggressive mode results in more frames labeled
-  //                         as in-active, c.f. definition of
-  //                         ACMVADMode in audio_coding_module_typedefs.h
-  //                         for valid values.
-  //
-  // Return value:
-  //   -1 if failed to set up VAD/DTX,
-  //    0 if succeeded.
-  //
-  virtual int32_t SetVAD(const bool enable_dtx = true,
-                         const bool enable_vad = false,
-                         const ACMVADMode vad_mode = VADNormal) = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t VAD()
-  // Get VAD status.
-  //
-  // Outputs:
-  //   -dtx_enabled        : is set to true if DTX is enabled, otherwise
-  //                         is set to false.
-  //   -vad_enabled        : is set to true if VAD is enabled, otherwise
-  //                         is set to false.
-  //   -vad_mode            : is set to the current aggressiveness of VAD.
-  //
-  // Return value:
-  //   -1 if fails to retrieve the setting of DTX/VAD,
-  //    0 if succeeded.
-  //
-  virtual int32_t VAD(bool* dtx_enabled,
-                      bool* vad_enabled,
-                      ACMVADMode* vad_mode) const = 0;
-
-  ///////////////////////////////////////////////////////////////////////////
   // int32_t RegisterVADCallback()
   // Call this method to register a callback function which is called
   // any time that ACM encounters an empty frame. That is a frame which is
@@ -455,29 +307,6 @@
   virtual bool RegisterReceiveCodec(int rtp_payload_type,
                                     const SdpAudioFormat& audio_format) = 0;
 
-  ///////////////////////////////////////////////////////////////////////////
-  // int32_t RegisterReceiveCodec()
-  // Register possible decoders, can be called multiple times for
-  // codecs, CNG-NB, CNG-WB, CNG-SWB, AVT and RED.
-  //
-  // Input:
-  //   -receive_codec      : parameters of the codec to be registered, c.f.
-  //                         common_types.h for the definition of
-  //                         CodecInst.
-  //
-  // Return value:
-  //   -1 if failed to register the codec
-  //    0 if the codec registered successfully.
-  //
-  virtual int RegisterReceiveCodec(const CodecInst& receive_codec) = 0;
-
-  // Register a decoder; call repeatedly to register multiple decoders. |df| is
-  // a decoder factory that returns an iSAC decoder; it will be called once if
-  // the decoder being registered is iSAC.
-  virtual int RegisterReceiveCodec(
-      const CodecInst& receive_codec,
-      rtc::FunctionView<std::unique_ptr<AudioDecoder>()> isac_factory) = 0;
-
   // Registers an external decoder. The name is only used to provide information
   // back to the caller about the decoder. Hence, the name is arbitrary, and may
   // be empty.
diff --git a/modules/audio_coding/include/audio_coding_module_typedefs.h b/modules/audio_coding/include/audio_coding_module_typedefs.h
index cd4351b..bafff72 100644
--- a/modules/audio_coding/include/audio_coding_module_typedefs.h
+++ b/modules/audio_coding/include/audio_coding_module_typedefs.h
@@ -13,6 +13,8 @@
 
 #include <map>
 
+#include "rtc_base/deprecation.h"
+
 namespace webrtc {
 
 ///////////////////////////////////////////////////////////////////////////
@@ -43,6 +45,84 @@
   kAudio = 1,
 };
 
+// Statistics for calls to AudioCodingModule::PlayoutData10Ms().
+struct AudioDecodingCallStats {
+  AudioDecodingCallStats()
+      : calls_to_silence_generator(0),
+        calls_to_neteq(0),
+        decoded_normal(0),
+        decoded_plc(0),
+        decoded_cng(0),
+        decoded_plc_cng(0),
+        decoded_muted_output(0) {}
+
+  int calls_to_silence_generator;  // Number of calls where silence generated,
+                                   // and NetEq was disengaged from decoding.
+  int calls_to_neteq;              // Number of calls to NetEq.
+  int decoded_normal;  // Number of calls where audio RTP packet decoded.
+  int decoded_plc;     // Number of calls resulted in PLC.
+  int decoded_cng;  // Number of calls where comfort noise generated due to DTX.
+  int decoded_plc_cng;       // Number of calls resulted where PLC faded to CNG.
+  int decoded_muted_output;  // Number of calls returning a muted state output.
+};
+
+// NETEQ statistics.
+struct NetworkStatistics {
+  // current jitter buffer size in ms
+  uint16_t currentBufferSize;
+  // preferred (optimal) buffer size in ms
+  uint16_t preferredBufferSize;
+  // adding extra delay due to "peaky jitter"
+  bool jitterPeaksFound;
+  // Stats below correspond to similarly-named fields in the WebRTC stats spec.
+  // https://w3c.github.io/webrtc-stats/#dom-rtcmediastreamtrackstats
+  uint64_t totalSamplesReceived;
+  uint64_t concealedSamples;
+  uint64_t concealmentEvents;
+  uint64_t jitterBufferDelayMs;
+  // Stats below DO NOT correspond directly to anything in the WebRTC stats
+  // Loss rate (network + late); fraction between 0 and 1, scaled to Q14.
+  uint16_t currentPacketLossRate;
+  // Late loss rate; fraction between 0 and 1, scaled to Q14.
+  union {
+    RTC_DEPRECATED uint16_t currentDiscardRate;
+  };
+  // fraction (of original stream) of synthesized audio inserted through
+  // expansion (in Q14)
+  uint16_t currentExpandRate;
+  // fraction (of original stream) of synthesized speech inserted through
+  // expansion (in Q14)
+  uint16_t currentSpeechExpandRate;
+  // fraction of synthesized speech inserted through pre-emptive expansion
+  // (in Q14)
+  uint16_t currentPreemptiveRate;
+  // fraction of data removed through acceleration (in Q14)
+  uint16_t currentAccelerateRate;
+  // fraction of data coming from secondary decoding (in Q14)
+  uint16_t currentSecondaryDecodedRate;
+  // Fraction of secondary data, including FEC and RED, that is discarded (in
+  // Q14). Discarding of secondary data can be caused by the reception of the
+  // primary data, obsoleting the secondary data. It can also be caused by early
+  // or late arrival of secondary data.
+  uint16_t currentSecondaryDiscardedRate;
+  // clock-drift in parts-per-million (negative or positive)
+  int32_t clockDriftPPM;
+  // average packet waiting time in the jitter buffer (ms)
+  int meanWaitingTimeMs;
+  // median packet waiting time in the jitter buffer (ms)
+  int medianWaitingTimeMs;
+  // min packet waiting time in the jitter buffer (ms)
+  int minWaitingTimeMs;
+  // max packet waiting time in the jitter buffer (ms)
+  int maxWaitingTimeMs;
+  // added samples in off mode due to packet loss
+  size_t addedSamples;
+  // count of the number of buffer flushes
+  uint64_t packetBufferFlushes;
+  // number of samples expanded due to delayed packets
+  uint64_t delayedPacketOutageSamples;
+};
+
 }  // namespace webrtc
 
 #endif  // MODULES_AUDIO_CODING_INCLUDE_AUDIO_CODING_MODULE_TYPEDEFS_H_
diff --git a/modules/audio_coding/neteq/decision_logic_unittest.cc b/modules/audio_coding/neteq/decision_logic_unittest.cc
index 08720d1..183b9c7 100644
--- a/modules/audio_coding/neteq/decision_logic_unittest.cc
+++ b/modules/audio_coding/neteq/decision_logic_unittest.cc
@@ -31,7 +31,7 @@
   TickTimer tick_timer;
   PacketBuffer packet_buffer(10, &tick_timer);
   DelayPeakDetector delay_peak_detector(&tick_timer);
-  DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
+  DelayManager delay_manager(240, 0, &delay_peak_detector, &tick_timer);
   BufferLevelFilter buffer_level_filter;
   DecisionLogic* logic = DecisionLogic::Create(
       fs_hz, output_size_samples, false, &decoder_database, packet_buffer,
@@ -48,7 +48,7 @@
   TickTimer tick_timer;
   PacketBuffer packet_buffer(10, &tick_timer);
   DelayPeakDetector delay_peak_detector(&tick_timer);
-  DelayManager delay_manager(240, &delay_peak_detector, &tick_timer);
+  DelayManager delay_manager(240, 0, &delay_peak_detector, &tick_timer);
   BufferLevelFilter buffer_level_filter;
   {
     test::ScopedFieldTrials field_trial(
diff --git a/modules/audio_coding/neteq/delay_manager.cc b/modules/audio_coding/neteq/delay_manager.cc
index 628812a..67e6a13 100644
--- a/modules/audio_coding/neteq/delay_manager.cc
+++ b/modules/audio_coding/neteq/delay_manager.cc
@@ -62,6 +62,7 @@
 namespace webrtc {
 
 DelayManager::DelayManager(size_t max_packets_in_buffer,
+                           int base_min_target_delay_ms,
                            DelayPeakDetector* peak_detector,
                            const TickTimer* tick_timer)
     : first_packet_received_(false),
@@ -69,13 +70,14 @@
       iat_vector_(kMaxIat + 1, 0),
       iat_factor_(0),
       tick_timer_(tick_timer),
+      base_min_target_delay_ms_(base_min_target_delay_ms),
       base_target_level_(4),                   // In Q0 domain.
       target_level_(base_target_level_ << 8),  // In Q8 domain.
       packet_len_ms_(0),
       streaming_mode_(false),
       last_seq_no_(0),
       last_timestamp_(0),
-      minimum_delay_ms_(0),
+      minimum_delay_ms_(base_min_target_delay_ms_),
       maximum_delay_ms_(target_level_),
       iat_cumulative_sum_(0),
       max_iat_cumulative_sum_(0),
@@ -85,6 +87,8 @@
           field_trial::IsEnabled("WebRTC-Audio-NetEqFramelengthExperiment")),
       forced_limit_probability_(GetForcedLimitProbability()) {
   assert(peak_detector);  // Should never be NULL.
+  RTC_DCHECK_GE(base_min_target_delay_ms_, 0);
+  RTC_DCHECK_LE(minimum_delay_ms_, maximum_delay_ms_);
 
   Reset();
 }
@@ -485,7 +489,7 @@
            static_cast<int>(3 * max_packets_in_buffer_ * packet_len_ms_ / 4))) {
     return false;
   }
-  minimum_delay_ms_ = delay_ms;
+  minimum_delay_ms_ = std::max(delay_ms, base_min_target_delay_ms_);
   return true;
 }
 
diff --git a/modules/audio_coding/neteq/delay_manager.h b/modules/audio_coding/neteq/delay_manager.h
index cd5fc09..2c8081b 100644
--- a/modules/audio_coding/neteq/delay_manager.h
+++ b/modules/audio_coding/neteq/delay_manager.h
@@ -31,9 +31,11 @@
 
   // Create a DelayManager object. Notify the delay manager that the packet
   // buffer can hold no more than |max_packets_in_buffer| packets (i.e., this
-  // is the number of packet slots in the buffer). Supply a PeakDetector
-  // object to the DelayManager.
+  // is the number of packet slots in the buffer) and that the target delay
+  // should be greater than or equal to |base_min_target_delay_ms|. Supply a
+  // PeakDetector object to the DelayManager.
   DelayManager(size_t max_packets_in_buffer,
+               int base_min_target_delay_ms,
                DelayPeakDetector* peak_detector,
                const TickTimer* tick_timer);
 
@@ -144,6 +146,8 @@
   IATVector iat_vector_;                // Histogram of inter-arrival times.
   int iat_factor_;  // Forgetting factor for updating the IAT histogram (Q15).
   const TickTimer* tick_timer_;
+  const int base_min_target_delay_ms_;  // Lower bound for target_level_ and
+                                        // minimum_delay_ms_.
   // Time elapsed since last packet.
   std::unique_ptr<TickTimer::Stopwatch> packet_iat_stopwatch_;
   int base_target_level_;  // Currently preferred buffer level before peak
diff --git a/modules/audio_coding/neteq/delay_manager_unittest.cc b/modules/audio_coding/neteq/delay_manager_unittest.cc
index e4e865f..6281a15 100644
--- a/modules/audio_coding/neteq/delay_manager_unittest.cc
+++ b/modules/audio_coding/neteq/delay_manager_unittest.cc
@@ -27,6 +27,7 @@
 class DelayManagerTest : public ::testing::Test {
  protected:
   static const int kMaxNumberOfPackets = 240;
+  static const int kMinDelayMs = 0;
   static const int kTimeStepMs = 10;
   static const int kFs = 8000;
   static const int kFrameSizeMs = 20;
@@ -56,7 +57,8 @@
 
 void DelayManagerTest::RecreateDelayManager() {
   EXPECT_CALL(detector_, Reset()).Times(1);
-  dm_.reset(new DelayManager(kMaxNumberOfPackets, &detector_, &tick_timer_));
+  dm_.reset(new DelayManager(kMaxNumberOfPackets, kMinDelayMs, &detector_,
+                             &tick_timer_));
 }
 
 void DelayManagerTest::SetPacketAudioLength(int lengt_ms) {
diff --git a/modules/audio_coding/neteq/expand.cc b/modules/audio_coding/neteq/expand.cc
index 97ce529..4a06d09 100644
--- a/modules/audio_coding/neteq/expand.cc
+++ b/modules/audio_coding/neteq/expand.cc
@@ -323,8 +323,7 @@
   current_lag_index_ = 0;
   lag_index_direction_ = 0;
   stop_muting_ = true;  // Do not mute signal any more.
-  statistics_->LogDelayedPacketOutageEvent(
-      rtc::dchecked_cast<int>(expand_duration_samples_) / (fs_hz_ / 1000));
+  statistics_->LogDelayedPacketOutageEvent(expand_duration_samples_, fs_hz_);
 }
 
 void Expand::SetParametersForMergeAfterExpand() {
diff --git a/modules/audio_coding/neteq/expand_unittest.cc b/modules/audio_coding/neteq/expand_unittest.cc
index b4e6466..09914da 100644
--- a/modules/audio_coding/neteq/expand_unittest.cc
+++ b/modules/audio_coding/neteq/expand_unittest.cc
@@ -51,14 +51,16 @@
 namespace {
 class FakeStatisticsCalculator : public StatisticsCalculator {
  public:
-  void LogDelayedPacketOutageEvent(int outage_duration_ms) override {
-    last_outage_duration_ms_ = outage_duration_ms;
+  void LogDelayedPacketOutageEvent(int num_samples, int fs_hz) override {
+    last_outage_duration_samples_ = num_samples;
   }
 
-  int last_outage_duration_ms() const { return last_outage_duration_ms_; }
+  int last_outage_duration_samples() const {
+    return last_outage_duration_samples_;
+  }
 
  private:
-  int last_outage_duration_ms_ = 0;
+  int last_outage_duration_samples_ = 0;
 };
 
 // This is the same size that is given to the SyncBuffer object in NetEq.
@@ -120,13 +122,12 @@
     EXPECT_EQ(0, expand_.Process(&output));
     EXPECT_GT(output.Size(), 0u);
     sum_output_len_samples += output.Size();
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
   // Convert |sum_output_len_samples| to milliseconds.
-  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
-                                   (test_sample_rate_hz_ / 1000)),
-            statistics_.last_outage_duration_ms());
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+            statistics_.last_outage_duration_samples());
 }
 
 // This test is similar to DelayedPacketOutage, but ends by calling
@@ -140,10 +141,10 @@
     EXPECT_EQ(0, expand_.Process(&output));
     EXPECT_GT(output.Size(), 0u);
     sum_output_len_samples += output.Size();
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForMergeAfterExpand();
-  EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+  EXPECT_EQ(0, statistics_.last_outage_duration_samples());
 }
 
 // This test is similar to the DelayedPacketOutage test above, but with the
@@ -161,13 +162,12 @@
       expand_.Reset();
       sum_output_len_samples = 0;
     }
-    EXPECT_EQ(0, statistics_.last_outage_duration_ms());
+    EXPECT_EQ(0, statistics_.last_outage_duration_samples());
   }
   expand_.SetParametersForNormalAfterExpand();
   // Convert |sum_output_len_samples| to milliseconds.
-  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples /
-                                   (test_sample_rate_hz_ / 1000)),
-            statistics_.last_outage_duration_ms());
+  EXPECT_EQ(rtc::checked_cast<int>(sum_output_len_samples),
+            statistics_.last_outage_duration_samples());
 }
 
 namespace {
diff --git a/modules/audio_coding/neteq/include/neteq.h b/modules/audio_coding/neteq/include/neteq.h
index 530975f..2820fd8 100644
--- a/modules/audio_coding/neteq/include/neteq.h
+++ b/modules/audio_coding/neteq/include/neteq.h
@@ -72,6 +72,7 @@
   uint64_t jitter_buffer_delay_ms = 0;
   // Below stat is not part of the spec.
   uint64_t voice_concealed_samples = 0;
+  uint64_t delayed_packet_outage_samples = 0;
 };
 
 // Metrics that describe the operations performed in NetEq, and the internal
@@ -112,6 +113,7 @@
     bool enable_post_decode_vad = false;
     size_t max_packets_in_buffer = 50;
     int max_delay_ms = 2000;
+    int min_delay_ms = 0;
     bool enable_fast_accelerate = false;
     bool enable_muted_state = false;
     absl::optional<AudioCodecPairId> codec_pair_id;
@@ -231,13 +233,6 @@
   // statistics are never reset.
   virtual NetEqOperationsAndState GetOperationsAndState() const = 0;
 
-  // Writes the current RTCP statistics to |stats|. The statistics are reset
-  // and a new report period is started with the call.
-  virtual void GetRtcpStatistics(RtcpStatistics* stats) = 0;
-
-  // Same as RtcpStatistics(), but does not reset anything.
-  virtual void GetRtcpStatisticsNoReset(RtcpStatistics* stats) = 0;
-
   // Enables post-decode VAD. When enabled, GetAudio() will return
   // kOutputVADPassive when the signal contains no speech.
   virtual void EnableVad() = 0;
@@ -266,10 +261,6 @@
   // Flushes both the packet buffer and the sync buffer.
   virtual void FlushBuffers() = 0;
 
-  // Current usage of packet-buffer and it's limits.
-  virtual void PacketBufferStatistics(int* current_num_packets,
-                                      int* max_num_packets) const = 0;
-
   // Enables NACK and sets the maximum size of the NACK list, which should be
   // positive and no larger than Nack::kNackListSizeLimit. If NACK is already
   // enabled then the maximum NACK list size is modified accordingly.
diff --git a/modules/audio_coding/neteq/mock/mock_delay_manager.h b/modules/audio_coding/neteq/mock/mock_delay_manager.h
index 9b2ed49..206cea7 100644
--- a/modules/audio_coding/neteq/mock/mock_delay_manager.h
+++ b/modules/audio_coding/neteq/mock/mock_delay_manager.h
@@ -20,9 +20,13 @@
 class MockDelayManager : public DelayManager {
  public:
   MockDelayManager(size_t max_packets_in_buffer,
+                   int base_min_target_delay_ms,
                    DelayPeakDetector* peak_detector,
                    const TickTimer* tick_timer)
-      : DelayManager(max_packets_in_buffer, peak_detector, tick_timer) {}
+      : DelayManager(max_packets_in_buffer,
+                     base_min_target_delay_ms,
+                     peak_detector,
+                     tick_timer) {}
   virtual ~MockDelayManager() { Die(); }
   MOCK_METHOD0(Die, void());
   MOCK_CONST_METHOD0(iat_vector, const IATVector&());
diff --git a/modules/audio_coding/neteq/neteq_impl.cc b/modules/audio_coding/neteq/neteq_impl.cc
index 6a2cbae..2a025f3 100644
--- a/modules/audio_coding/neteq/neteq_impl.cc
+++ b/modules/audio_coding/neteq/neteq_impl.cc
@@ -63,6 +63,7 @@
           new DecoderDatabase(decoder_factory, config.codec_pair_id)),
       delay_peak_detector(new DelayPeakDetector(tick_timer.get())),
       delay_manager(new DelayManager(config.max_packets_in_buffer,
+                                     config.min_delay_ms,
                                      delay_peak_detector.get(),
                                      tick_timer.get())),
       dtmf_buffer(new DtmfBuffer(config.sample_rate_hz)),
@@ -387,20 +388,6 @@
   return result;
 }
 
-void NetEqImpl::GetRtcpStatistics(RtcpStatistics* stats) {
-  rtc::CritScope lock(&crit_sect_);
-  if (stats) {
-    rtcp_.GetStatistics(false, stats);
-  }
-}
-
-void NetEqImpl::GetRtcpStatisticsNoReset(RtcpStatistics* stats) {
-  rtc::CritScope lock(&crit_sect_);
-  if (stats) {
-    rtcp_.GetStatistics(true, stats);
-  }
-}
-
 void NetEqImpl::EnableVad() {
   rtc::CritScope lock(&crit_sect_);
   assert(vad_.get());
@@ -475,12 +462,6 @@
   first_packet_ = true;
 }
 
-void NetEqImpl::PacketBufferStatistics(int* current_num_packets,
-                                       int* max_num_packets) const {
-  rtc::CritScope lock(&crit_sect_);
-  packet_buffer_->BufferStat(current_num_packets, max_num_packets);
-}
-
 void NetEqImpl::EnableNack(size_t max_nack_list_size) {
   rtc::CritScope lock(&crit_sect_);
   if (!nack_enabled_) {
@@ -576,8 +557,6 @@
     // Note: |first_packet_| will be cleared further down in this method, once
     // the packet has been successfully inserted into the packet buffer.
 
-    rtcp_.Init(rtp_header.sequenceNumber);
-
     // Flush the packet buffer and DTMF buffer.
     packet_buffer_->Flush();
     dtmf_buffer_->Flush();
@@ -592,9 +571,6 @@
     timestamp_ = main_timestamp;
   }
 
-  // Update RTCP statistics, only for regular packets.
-  rtcp_.Update(rtp_header, receive_timestamp);
-
   if (nack_enabled_) {
     RTC_DCHECK(nack_);
     if (update_sample_rate_and_channels) {
diff --git a/modules/audio_coding/neteq/neteq_impl.h b/modules/audio_coding/neteq/neteq_impl.h
index 36990fb..525ae61 100644
--- a/modules/audio_coding/neteq/neteq_impl.h
+++ b/modules/audio_coding/neteq/neteq_impl.h
@@ -22,7 +22,6 @@
 #include "modules/audio_coding/neteq/include/neteq.h"
 #include "modules/audio_coding/neteq/packet.h"
 #include "modules/audio_coding/neteq/random_vector.h"
-#include "modules/audio_coding/neteq/rtcp.h"
 #include "modules/audio_coding/neteq/statistics_calculator.h"
 #include "modules/audio_coding/neteq/tick_timer.h"
 #include "rtc_base/constructormagic.h"
@@ -170,17 +169,10 @@
   // after the call.
   int NetworkStatistics(NetEqNetworkStatistics* stats) override;
 
-  // Writes the current RTCP statistics to |stats|. The statistics are reset
-  // and a new report period is started with the call.
-  void GetRtcpStatistics(RtcpStatistics* stats) override;
-
   NetEqLifetimeStatistics GetLifetimeStatistics() const override;
 
   NetEqOperationsAndState GetOperationsAndState() const override;
 
-  // Same as RtcpStatistics(), but does not reset anything.
-  void GetRtcpStatisticsNoReset(RtcpStatistics* stats) override;
-
   // Enables post-decode VAD. When enabled, GetAudio() will return
   // kOutputVADPassive when the signal contains no speech.
   void EnableVad() override;
@@ -200,9 +192,6 @@
   // Flushes both the packet buffer and the sync buffer.
   void FlushBuffers() override;
 
-  void PacketBufferStatistics(int* current_num_packets,
-                              int* max_num_packets) const override;
-
   void EnableNack(size_t max_nack_list_size) override;
 
   void DisableNack() override;
@@ -395,7 +384,6 @@
       RTC_GUARDED_BY(crit_sect_);
   RandomVector random_vector_ RTC_GUARDED_BY(crit_sect_);
   std::unique_ptr<ComfortNoise> comfort_noise_ RTC_GUARDED_BY(crit_sect_);
-  Rtcp rtcp_ RTC_GUARDED_BY(crit_sect_);
   StatisticsCalculator stats_ RTC_GUARDED_BY(crit_sect_);
   int fs_hz_ RTC_GUARDED_BY(crit_sect_);
   int fs_mult_ RTC_GUARDED_BY(crit_sect_);
diff --git a/modules/audio_coding/neteq/neteq_impl_unittest.cc b/modules/audio_coding/neteq/neteq_impl_unittest.cc
index b772dfa..0e087c8 100644
--- a/modules/audio_coding/neteq/neteq_impl_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_impl_unittest.cc
@@ -92,7 +92,8 @@
 
     if (use_mock_delay_manager_) {
       std::unique_ptr<MockDelayManager> mock(new MockDelayManager(
-          config_.max_packets_in_buffer, delay_peak_detector_, tick_timer_));
+          config_.max_packets_in_buffer, config_.min_delay_ms,
+          delay_peak_detector_, tick_timer_));
       mock_delay_manager_ = mock.get();
       EXPECT_CALL(*mock_delay_manager_, set_streaming_mode(false)).Times(1);
       deps.delay_manager = std::move(mock);
diff --git a/modules/audio_coding/neteq/neteq_unittest.cc b/modules/audio_coding/neteq/neteq_unittest.cc
index 1c9b9e7..e8b5023 100644
--- a/modules/audio_coding/neteq/neteq_unittest.cc
+++ b/modules/audio_coding/neteq/neteq_unittest.cc
@@ -22,12 +22,12 @@
 
 #include "api/audio/audio_frame.h"
 #include "api/audio_codecs/builtin_audio_decoder_factory.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "modules/audio_coding/codecs/pcm16b/pcm16b.h"
 #include "modules/audio_coding/neteq/tools/audio_loop.h"
 #include "modules/audio_coding/neteq/tools/neteq_packet_source_input.h"
 #include "modules/audio_coding/neteq/tools/neteq_test.h"
 #include "modules/audio_coding/neteq/tools/rtp_file_source.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
 #include "rtc_base/ignore_wundef.h"
 #include "rtc_base/messagedigest.h"
 #include "rtc_base/numerics/safe_conversions.h"
@@ -234,7 +234,14 @@
   buffer.resize(digest_->Size());
   digest_->Finish(&buffer[0], buffer.size());
   const std::string result = rtc::hex_encode(&buffer[0], digest_->Size());
-  EXPECT_EQ(checksum, result);
+  if (checksum.size() == result.size()) {
+    EXPECT_EQ(checksum, result);
+  } else {
+    // Check result is one the '|'-separated checksums.
+    EXPECT_NE(checksum.find(result), std::string::npos)
+        << result << " should be one of these:\n"
+        << checksum;
+  }
 }
 
 class NetEqDecodingTest : public ::testing::Test {
@@ -258,7 +265,6 @@
   void DecodeAndCompare(const std::string& rtp_file,
                         const std::string& output_checksum,
                         const std::string& network_stats_checksum,
-                        const std::string& rtcp_stats_checksum,
                         bool gen_ref);
 
   static void PopulateRtpInfo(int frame_index,
@@ -366,7 +372,6 @@
     const std::string& rtp_file,
     const std::string& output_checksum,
     const std::string& network_stats_checksum,
-    const std::string& rtcp_stats_checksum,
     bool gen_ref) {
   OpenInputFile(rtp_file);
 
@@ -378,10 +383,6 @@
       gen_ref ? webrtc::test::OutputPath() + "neteq_network_stats.dat" : "";
   ResultSink network_stats(stat_out_file);
 
-  std::string rtcp_out_file =
-      gen_ref ? webrtc::test::OutputPath() + "neteq_rtcp_stats.dat" : "";
-  ResultSink rtcp_stats(rtcp_out_file);
-
   packet_ = rtp_source_->NextPacket();
   int i = 0;
   uint64_t last_concealed_samples = 0;
@@ -418,11 +419,6 @@
       EXPECT_NEAR(
           (delta_concealed_samples << 14) / delta_total_samples_received,
           current_network_stats.expand_rate, (2 << 14) / 100.0);
-
-      // Process RTCPstat.
-      RtcpStatistics current_rtcp_stats;
-      neteq_->GetRtcpStatistics(&current_rtcp_stats);
-      ASSERT_NO_FATAL_FAILURE(rtcp_stats.AddResult(current_rtcp_stats));
     }
   }
 
@@ -430,8 +426,6 @@
   output.VerifyChecksum(output_checksum);
   SCOPED_TRACE("Check network stats.");
   network_stats.VerifyChecksum(network_stats_checksum);
-  SCOPED_TRACE("Check rtcp stats.");
-  rtcp_stats.VerifyChecksum(rtcp_stats_checksum);
 }
 
 void NetEqDecodingTest::PopulateRtpInfo(int frame_index,
@@ -481,14 +475,8 @@
                        "4b2370f5c794741d2a46be5c7935c66ef3fb53e9",
                        "4b2370f5c794741d2a46be5c7935c66ef3fb53e9");
 
-  const std::string rtcp_stats_checksum =
-      PlatformChecksum("b8880bf9fed2487efbddcb8d94b9937a29ae521d",
-                       "f3f7b3d3e71d7e635240b5373b57df6a7e4ce9d4", "not used",
-                       "b8880bf9fed2487efbddcb8d94b9937a29ae521d",
-                       "b8880bf9fed2487efbddcb8d94b9937a29ae521d");
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
@@ -501,12 +489,13 @@
   const std::string input_rtp_file =
       webrtc::test::ResourcePath("audio_coding/neteq_opus", "rtp");
 
-  const std::string output_checksum =
-      PlatformChecksum("14a63b3c7b925c82296be4bafc71bec85f2915c2",
-                       "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
-                       "5876e52dda90d5ca433c3726555b907b97c86374",
-                       "14a63b3c7b925c82296be4bafc71bec85f2915c2",
-                       "14a63b3c7b925c82296be4bafc71bec85f2915c2");
+  // Checksum depends on libopus being compiled with or without SSE.
+  const std::string maybe_sse =
+      "14a63b3c7b925c82296be4bafc71bec85f2915c2|"
+      "2c05677daa968d6c68b92adf4affb7cd9bb4d363";
+  const std::string output_checksum = PlatformChecksum(
+      maybe_sse, "b7b7ed802b0e18ee416973bf3b9ae98599b0181d",
+      "5876e52dda90d5ca433c3726555b907b97c86374", maybe_sse, maybe_sse);
 
   const std::string network_stats_checksum =
       PlatformChecksum("adb3272498e436d1c019cbfd71610e9510c54497",
@@ -515,15 +504,8 @@
                        "adb3272498e436d1c019cbfd71610e9510c54497",
                        "adb3272498e436d1c019cbfd71610e9510c54497");
 
-  const std::string rtcp_stats_checksum =
-      PlatformChecksum("e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0",
-                       "e37c797e3de6a64dda88c9ade7a013d022a2e1e0");
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 #if !defined(WEBRTC_IOS) && defined(WEBRTC_NETEQ_UNITTEST_BITEXACT) && \
@@ -536,21 +518,18 @@
   const std::string input_rtp_file =
       webrtc::test::ResourcePath("audio_coding/neteq_opus_dtx", "rtp");
 
-  const std::string output_checksum =
-      PlatformChecksum("713af6c92881f5aab1285765ee6680da9d1c06ce",
-                       "3ec991b96872123f1554c03c543ca5d518431e46",
-                       "da9f9a2d94e0c2d67342fad4965d7b91cda50b25",
-                       "713af6c92881f5aab1285765ee6680da9d1c06ce",
-                       "713af6c92881f5aab1285765ee6680da9d1c06ce");
+  const std::string maybe_sse =
+      "713af6c92881f5aab1285765ee6680da9d1c06ce|"
+      "2ac10c4e79aeedd0df2863b079da5848b40f00b5";
+  const std::string output_checksum = PlatformChecksum(
+      maybe_sse, "3ec991b96872123f1554c03c543ca5d518431e46",
+      "da9f9a2d94e0c2d67342fad4965d7b91cda50b25", maybe_sse, maybe_sse);
 
   const std::string network_stats_checksum =
       "bab58dc587d956f326056d7340c96eb9d2d3cc21";
 
-  const std::string rtcp_stats_checksum =
-      "ac27a7f305efb58b39bf123dccee25dee5758e63";
-
   DecodeAndCompare(input_rtp_file, output_checksum, network_stats_checksum,
-                   rtcp_stats_checksum, FLAG_gen_ref);
+                   FLAG_gen_ref);
 }
 
 // Use fax mode to avoid time-scaling. This is to simplify the testing of
diff --git a/modules/audio_coding/neteq/packet_buffer.cc b/modules/audio_coding/neteq/packet_buffer.cc
index 7b70dee..343763b 100644
--- a/modules/audio_coding/neteq/packet_buffer.cc
+++ b/modules/audio_coding/neteq/packet_buffer.cc
@@ -299,9 +299,4 @@
   return false;
 }
 
-void PacketBuffer::BufferStat(int* num_packets, int* max_num_packets) const {
-  *num_packets = static_cast<int>(buffer_.size());
-  *max_num_packets = static_cast<int>(max_number_of_packets_);
-}
-
 }  // namespace webrtc
diff --git a/modules/audio_coding/neteq/packet_buffer.h b/modules/audio_coding/neteq/packet_buffer.h
index 269b957..0f5cd7f 100644
--- a/modules/audio_coding/neteq/packet_buffer.h
+++ b/modules/audio_coding/neteq/packet_buffer.h
@@ -125,8 +125,6 @@
   virtual bool ContainsDtxOrCngPacket(
       const DecoderDatabase* decoder_database) const;
 
-  virtual void BufferStat(int* num_packets, int* max_num_packets) const;
-
   // Static method returning true if |timestamp| is older than |timestamp_limit|
   // but less than |horizon_samples| behind |timestamp_limit|. For instance,
   // with timestamp_limit = 100 and horizon_samples = 10, a timestamp in the
diff --git a/modules/audio_coding/neteq/rtcp.cc b/modules/audio_coding/neteq/rtcp.cc
deleted file mode 100644
index 6519337..0000000
--- a/modules/audio_coding/neteq/rtcp.cc
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "modules/audio_coding/neteq/rtcp.h"
-
-#include <algorithm>
-#include <cstdlib>
-
-#include "api/rtp_headers.h"
-#include "common_types.h"
-#include "rtc_base/checks.h"
-
-namespace webrtc {
-
-void Rtcp::Init(uint16_t start_sequence_number) {
-  cycles_ = 0;
-  max_seq_no_ = start_sequence_number;
-  base_seq_no_ = start_sequence_number;
-  received_packets_ = 0;
-  received_packets_prior_ = 0;
-  expected_prior_ = 0;
-  jitter_ = 0;
-  transit_ = 0;
-}
-
-void Rtcp::Update(const RTPHeader& rtp_header, uint32_t receive_timestamp) {
-  // Update number of received packets, and largest packet number received.
-  received_packets_++;
-  int16_t sn_diff = rtp_header.sequenceNumber - max_seq_no_;
-  if (sn_diff >= 0) {
-    if (rtp_header.sequenceNumber < max_seq_no_) {
-      // Wrap-around detected.
-      cycles_++;
-    }
-    max_seq_no_ = rtp_header.sequenceNumber;
-  }
-
-  // Calculate jitter according to RFC 3550, and update previous timestamps.
-  // Note that the value in |jitter_| is in Q4.
-  if (received_packets_ > 1) {
-    int32_t ts_diff = receive_timestamp - (rtp_header.timestamp - transit_);
-    int64_t jitter_diff = (std::abs(int64_t{ts_diff}) << 4) - jitter_;
-    // Calculate 15 * jitter_ / 16 + jitter_diff / 16 (with proper rounding).
-    jitter_ = jitter_ + ((jitter_diff + 8) >> 4);
-    RTC_DCHECK_GE(jitter_, 0);
-  }
-  transit_ = rtp_header.timestamp - receive_timestamp;
-}
-
-void Rtcp::GetStatistics(bool no_reset, RtcpStatistics* stats) {
-  // Extended highest sequence number received.
-  stats->extended_highest_sequence_number =
-      (static_cast<int>(cycles_) << 16) + max_seq_no_;
-
-  // Calculate expected number of packets and compare it with the number of
-  // packets that were actually received. The cumulative number of lost packets
-  // can be extracted.
-  uint32_t expected_packets =
-      stats->extended_highest_sequence_number - base_seq_no_ + 1;
-  if (received_packets_ == 0) {
-    // No packets received, assume none lost.
-    stats->packets_lost = 0;
-  } else if (expected_packets > received_packets_) {
-    stats->packets_lost = expected_packets - received_packets_;
-    if (stats->packets_lost > 0xFFFFFF) {
-      stats->packets_lost = 0xFFFFFF;
-    }
-  } else {
-    stats->packets_lost = 0;
-  }
-
-  // Fraction lost since last report.
-  uint32_t expected_since_last = expected_packets - expected_prior_;
-  uint32_t received_since_last = received_packets_ - received_packets_prior_;
-  if (!no_reset) {
-    expected_prior_ = expected_packets;
-    received_packets_prior_ = received_packets_;
-  }
-  int32_t lost = expected_since_last - received_since_last;
-  if (expected_since_last == 0 || lost <= 0 || received_packets_ == 0) {
-    stats->fraction_lost = 0;
-  } else {
-    stats->fraction_lost = std::min(0xFFU, (lost << 8) / expected_since_last);
-  }
-
-  stats->jitter = jitter_ >> 4;  // Scaling from Q4.
-}
-
-}  // namespace webrtc
diff --git a/modules/audio_coding/neteq/rtcp.h b/modules/audio_coding/neteq/rtcp.h
deleted file mode 100644
index 60c2673..0000000
--- a/modules/audio_coding/neteq/rtcp.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
- *
- *  Use of this source code is governed by a BSD-style license
- *  that can be found in the LICENSE file in the root of the source
- *  tree. An additional intellectual property rights grant can be found
- *  in the file PATENTS.  All contributing project authors may
- *  be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef MODULES_AUDIO_CODING_NETEQ_RTCP_H_
-#define MODULES_AUDIO_CODING_NETEQ_RTCP_H_
-
-#include <stdint.h>
-
-#include "rtc_base/constructormagic.h"
-
-namespace webrtc {
-
-struct RtcpStatistics;
-struct RTPHeader;
-
-class Rtcp {
- public:
-  Rtcp() { Init(0); }
-
-  ~Rtcp() {}
-
-  // Resets the RTCP statistics, and sets the first received sequence number.
-  void Init(uint16_t start_sequence_number);
-
-  // Updates the RTCP statistics with a new received packet.
-  void Update(const RTPHeader& rtp_header, uint32_t receive_timestamp);
-
-  // Returns the current RTCP statistics. If |no_reset| is true, the statistics
-  // are not reset, otherwise they are.
-  void GetStatistics(bool no_reset, RtcpStatistics* stats);
-
- private:
-  uint16_t cycles_;       // The number of wrap-arounds for the sequence number.
-  uint16_t max_seq_no_;   // The maximum sequence number received. Starts over
-                          // from 0 after wrap-around.
-  uint16_t base_seq_no_;  // The sequence number of the first received packet.
-  uint32_t received_packets_;  // The number of packets that have been received.
-  uint32_t received_packets_prior_;  // Number of packets received when last
-                                     // report was generated.
-  uint32_t expected_prior_;  // Expected number of packets, at the time of the
-                             // last report.
-  int64_t jitter_;           // Current jitter value in Q4.
-  int32_t transit_;          // Clock difference for previous packet.
-
-  RTC_DISALLOW_COPY_AND_ASSIGN(Rtcp);
-};
-
-}  // namespace webrtc
-#endif  // MODULES_AUDIO_CODING_NETEQ_RTCP_H_
diff --git a/modules/audio_coding/neteq/statistics_calculator.cc b/modules/audio_coding/neteq/statistics_calculator.cc
index 807d7ee..50521fb 100644
--- a/modules/audio_coding/neteq/statistics_calculator.cc
+++ b/modules/audio_coding/neteq/statistics_calculator.cc
@@ -257,11 +257,14 @@
   buffer_full_counter_.RegisterSample();
 }
 
-void StatisticsCalculator::LogDelayedPacketOutageEvent(int outage_duration_ms) {
+void StatisticsCalculator::LogDelayedPacketOutageEvent(int num_samples,
+                                                       int fs_hz) {
+  int outage_duration_ms = num_samples / (fs_hz / 1000);
   RTC_HISTOGRAM_COUNTS("WebRTC.Audio.DelayedPacketOutageEventMs",
                        outage_duration_ms, 1 /* min */, 2000 /* max */,
                        100 /* bucket count */);
   delayed_packet_outage_counter_.RegisterSample();
+  lifetime_stats_.delayed_packet_outage_samples += num_samples;
 }
 
 void StatisticsCalculator::StoreWaitingTime(int waiting_time_ms) {
diff --git a/modules/audio_coding/neteq/statistics_calculator.h b/modules/audio_coding/neteq/statistics_calculator.h
index 6a5f7f4..49b74a0 100644
--- a/modules/audio_coding/neteq/statistics_calculator.h
+++ b/modules/audio_coding/neteq/statistics_calculator.h
@@ -86,10 +86,10 @@
   // Rerport that the packet buffer was flushed.
   void FlushedPacketBuffer();
 
-  // Logs a delayed packet outage event of |outage_duration_ms|. A delayed
-  // packet outage event is defined as an expand period caused not by an actual
-  // packet loss, but by a delayed packet.
-  virtual void LogDelayedPacketOutageEvent(int outage_duration_ms);
+  // Logs a delayed packet outage event of |num_samples| expanded at a sample
+  // rate of |fs_hz|. A delayed packet outage event is defined as an expand
+  // period caused not by an actual packet loss, but by a delayed packet.
+  virtual void LogDelayedPacketOutageEvent(int num_samples, int fs_hz);
 
   // Returns the current network statistics in |stats|. The current sample rate
   // is |fs_hz|, the total number of samples in packet buffer and sync buffer
diff --git a/modules/audio_processing/BUILD.gn b/modules/audio_processing/BUILD.gn
index a244c84..66b07df 100644
--- a/modules/audio_processing/BUILD.gn
+++ b/modules/audio_processing/BUILD.gn
@@ -329,6 +329,7 @@
   deps = [
     "../../api:array_view",
     "../../common_audio:common_audio",
+    "../../rtc_base:checks",
     "../../rtc_base:rtc_base_approved",
     "../../rtc_base:stringutils",
   ]
@@ -413,6 +414,7 @@
       "../../rtc_base:protobuf_utils",
       "../../rtc_base:rtc_base",
       "../../rtc_base:rtc_base_approved",
+      "../../rtc_base:rtc_base_tests_utils",
       "../../rtc_base:safe_minmax",
       "../../rtc_base/system:arch",
       "../../rtc_base/system:file_wrapper",
@@ -429,6 +431,7 @@
       "agc2:biquad_filter_unittests",
       "agc2:fixed_digital_unittests",
       "agc2:noise_estimator_unittests",
+      "agc2:rnn_vad_with_level_unittests",
       "agc2:test_utils",
       "agc2/rnn_vad:unittests",
       "test/conversational_speech:unittest",
@@ -457,7 +460,6 @@
         ":audioproc_unittest_proto",
         ":runtime_settings_protobuf_utils",
         "../../api/audio:audio_frame_api",
-        "../../rtc_base:rtc_base_tests_utils",
         "../../rtc_base:rtc_task_queue",
         "aec_dump",
         "aec_dump:aec_dump_unittests",
@@ -588,6 +590,7 @@
         "aec_dump:aec_dump_impl",
         "//testing/gtest",
         "//third_party/abseil-cpp/absl/memory",
+        "//third_party/abseil-cpp/absl/strings",
         "//third_party/abseil-cpp/absl/types:optional",
       ]
     }  # audioproc_f_impl
diff --git a/modules/audio_processing/aec3/BUILD.gn b/modules/audio_processing/aec3/BUILD.gn
index c3f6dd5..189bcfd 100644
--- a/modules/audio_processing/aec3/BUILD.gn
+++ b/modules/audio_processing/aec3/BUILD.gn
@@ -20,6 +20,8 @@
     "aec3_fft.h",
     "aec_state.cc",
     "aec_state.h",
+    "api_call_jitter_metrics.cc",
+    "api_call_jitter_metrics.h",
     "block_delay_buffer.cc",
     "block_delay_buffer.h",
     "block_framer.cc",
@@ -31,6 +33,8 @@
     "block_processor_metrics.h",
     "cascaded_biquad_filter.cc",
     "cascaded_biquad_filter.h",
+    "clockdrift_detector.cc",
+    "clockdrift_detector.h",
     "comfort_noise_generator.cc",
     "comfort_noise_generator.h",
     "decimator.cc",
@@ -101,6 +105,8 @@
     "reverb_model_fallback.h",
     "shadow_filter_update_gain.cc",
     "shadow_filter_update_gain.h",
+    "signal_dependent_erle_estimator.cc",
+    "signal_dependent_erle_estimator.h",
     "skew_estimator.cc",
     "skew_estimator.h",
     "stationarity_estimator.cc",
@@ -188,11 +194,13 @@
         "adaptive_fir_filter_unittest.cc",
         "aec3_fft_unittest.cc",
         "aec_state_unittest.cc",
+        "api_call_jitter_metrics_unittest.cc",
         "block_delay_buffer_unittest.cc",
         "block_framer_unittest.cc",
         "block_processor_metrics_unittest.cc",
         "block_processor_unittest.cc",
         "cascaded_biquad_filter_unittest.cc",
+        "clockdrift_detector_unittest.cc",
         "comfort_noise_generator_unittest.cc",
         "decimator_unittest.cc",
         "echo_canceller3_unittest.cc",
@@ -216,6 +224,7 @@
         "residual_echo_estimator_unittest.cc",
         "reverb_model_estimator_unittest.cc",
         "shadow_filter_update_gain_unittest.cc",
+        "signal_dependent_erle_estimator_unittest.cc",
         "skew_estimator_unittest.cc",
         "subtractor_unittest.cc",
         "suppression_filter_unittest.cc",
diff --git a/modules/audio_processing/aec3/aec_state.cc b/modules/audio_processing/aec3/aec_state.cc
index 0eeb7eb..45b361f 100644
--- a/modules/audio_processing/aec3/aec_state.cc
+++ b/modules/audio_processing/aec3/aec_state.cc
@@ -91,10 +91,7 @@
       legacy_filter_quality_state_(config_),
       legacy_saturation_detector_(config_),
       erl_estimator_(2 * kNumBlocksPerSecond),
-      erle_estimator_(2 * kNumBlocksPerSecond,
-                      config_.erle.min,
-                      config_.erle.max_l,
-                      config_.erle.max_h),
+      erle_estimator_(2 * kNumBlocksPerSecond, config_),
       suppression_gain_limiter_(config_),
       filter_analyzer_(config_),
       echo_audibility_(
@@ -154,8 +151,7 @@
   subtractor_output_analyzer_.Update(subtractor_output);
 
   // Analyze the properties of the filter.
-  filter_analyzer_.Update(adaptive_filter_impulse_response,
-                          adaptive_filter_frequency_response, render_buffer);
+  filter_analyzer_.Update(adaptive_filter_impulse_response, render_buffer);
 
   // Estimate the direct path delay of the filter.
   delay_state_.Update(filter_analyzer_, external_delay,
@@ -210,7 +206,8 @@
   const auto& X2_input_erle =
       enable_erle_updates_during_reverb_ ? X2_reverb : X2;
 
-  erle_estimator_.Update(X2_input_erle, Y2, E2_main,
+  erle_estimator_.Update(render_buffer, adaptive_filter_frequency_response,
+                         X2_input_erle, Y2, E2_main,
                          subtractor_output_analyzer_.ConvergedFilter(),
                          config_.erle.onset_detection);
 
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics.cc b/modules/audio_processing/aec3/api_call_jitter_metrics.cc
new file mode 100644
index 0000000..45f56a5
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics.cc
@@ -0,0 +1,121 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "system_wrappers/include/metrics.h"
+
+namespace webrtc {
+namespace {
+
+bool TimeToReportMetrics(int frames_since_last_report) {
+  constexpr int kNumFramesPerSecond = 100;
+  constexpr int kReportingIntervalFrames = 10 * kNumFramesPerSecond;
+  return frames_since_last_report == kReportingIntervalFrames;
+}
+
+}  // namespace
+
+ApiCallJitterMetrics::Jitter::Jitter()
+    : max_(0), min_(std::numeric_limits<int>::max()) {}
+
+void ApiCallJitterMetrics::Jitter::Update(int num_api_calls_in_a_row) {
+  min_ = std::min(min_, num_api_calls_in_a_row);
+  max_ = std::max(max_, num_api_calls_in_a_row);
+}
+
+void ApiCallJitterMetrics::Jitter::Reset() {
+  min_ = std::numeric_limits<int>::max();
+  max_ = 0;
+}
+
+void ApiCallJitterMetrics::Reset() {
+  render_jitter_.Reset();
+  capture_jitter_.Reset();
+  num_api_calls_in_a_row_ = 0;
+  frames_since_last_report_ = 0;
+  last_call_was_render_ = false;
+  proper_call_observed_ = false;
+}
+
+void ApiCallJitterMetrics::ReportRenderCall() {
+  if (!last_call_was_render_) {
+    // If the previous call was a capture and a proper call has been observed
+    // (containing both render and capture data), storing the last number of
+    // capture calls into the metrics.
+    if (proper_call_observed_) {
+      capture_jitter_.Update(num_api_calls_in_a_row_);
+    }
+
+    // Reset the call counter to start counting render calls.
+    num_api_calls_in_a_row_ = 0;
+  }
+  ++num_api_calls_in_a_row_;
+  last_call_was_render_ = true;
+}
+
+void ApiCallJitterMetrics::ReportCaptureCall() {
+  if (last_call_was_render_) {
+    // If the previous call was a render and a proper call has been observed
+    // (containing both render and capture data), storing the last number of
+    // render calls into the metrics.
+    if (proper_call_observed_) {
+      render_jitter_.Update(num_api_calls_in_a_row_);
+    }
+    // Reset the call counter to start counting capture calls.
+    num_api_calls_in_a_row_ = 0;
+
+    // If this statement is reached, at least one render and one capture call
+    // have been observed.
+    proper_call_observed_ = true;
+  }
+  ++num_api_calls_in_a_row_;
+  last_call_was_render_ = false;
+
+  // Only report and update jitter metrics for when a proper call, containing
+  // both render and capture data, has been observed.
+  if (proper_call_observed_ &&
+      TimeToReportMetrics(++frames_since_last_report_)) {
+    // Report jitter, where the base basic unit is frames.
+    constexpr int kMaxJitterToReport = 50;
+
+    // Report max and min jitter for render and capture, in units of 20 ms.
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MaxRenderJitter",
+        std::min(kMaxJitterToReport, render_jitter().max()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MinRenderJitter",
+        std::min(kMaxJitterToReport, render_jitter().min()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MaxCaptureJitter",
+        std::min(kMaxJitterToReport, capture_jitter().max()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+    RTC_HISTOGRAM_COUNTS_LINEAR(
+        "WebRTC.Audio.EchoCanceller.MinCaptureJitter",
+        std::min(kMaxJitterToReport, capture_jitter().min()), 1,
+        kMaxJitterToReport, kMaxJitterToReport);
+
+    frames_since_last_report_ = 0;
+    Reset();
+  }
+}
+
+bool ApiCallJitterMetrics::WillReportMetricsAtNextCapture() const {
+  return TimeToReportMetrics(frames_since_last_report_ + 1);
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics.h b/modules/audio_processing/aec3/api_call_jitter_metrics.h
new file mode 100644
index 0000000..dd1fa82
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics.h
@@ -0,0 +1,60 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
+
+namespace webrtc {
+
+// Stores data for reporting metrics on the API call jitter.
+class ApiCallJitterMetrics {
+ public:
+  class Jitter {
+   public:
+    Jitter();
+    void Update(int num_api_calls_in_a_row);
+    void Reset();
+
+    int min() const { return min_; }
+    int max() const { return max_; }
+
+   private:
+    int max_;
+    int min_;
+  };
+
+  ApiCallJitterMetrics() { Reset(); }
+
+  // Update metrics for render API call.
+  void ReportRenderCall();
+
+  // Update and periodically report metrics for capture API call.
+  void ReportCaptureCall();
+
+  // Methods used only for testing.
+  const Jitter& render_jitter() const { return render_jitter_; }
+  const Jitter& capture_jitter() const { return capture_jitter_; }
+  bool WillReportMetricsAtNextCapture() const;
+
+ private:
+  void Reset();
+
+  Jitter render_jitter_;
+  Jitter capture_jitter_;
+
+  int num_api_calls_in_a_row_ = 0;
+  int frames_since_last_report_ = 0;
+  bool last_call_was_render_ = false;
+  bool proper_call_observed_ = false;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_API_CALL_JITTER_METRICS_H_
diff --git a/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc b/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
new file mode 100644
index 0000000..86608aa
--- /dev/null
+++ b/modules/audio_processing/aec3/api_call_jitter_metrics_unittest.cc
@@ -0,0 +1,109 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+
+// Verify constant jitter.
+TEST(ApiCallJitterMetrics, ConstantJitter) {
+  for (int jitter = 1; jitter < 20; ++jitter) {
+    ApiCallJitterMetrics metrics;
+    for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+      for (int j = 0; j < jitter; ++j) {
+        metrics.ReportRenderCall();
+      }
+
+      for (int j = 0; j < jitter; ++j) {
+        metrics.ReportCaptureCall();
+
+        if (metrics.WillReportMetricsAtNextCapture()) {
+          EXPECT_EQ(jitter, metrics.render_jitter().min());
+          EXPECT_EQ(jitter, metrics.render_jitter().max());
+          EXPECT_EQ(jitter, metrics.capture_jitter().min());
+          EXPECT_EQ(jitter, metrics.capture_jitter().max());
+        }
+      }
+    }
+  }
+}
+
+// Verify peaky jitter for the render.
+TEST(ApiCallJitterMetrics, JitterPeakRender) {
+  constexpr int kMinJitter = 2;
+  constexpr int kJitterPeak = 10;
+  constexpr int kPeakInterval = 100;
+
+  ApiCallJitterMetrics metrics;
+  int render_surplus = 0;
+
+  for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+    const int num_render_calls =
+        k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+    for (int j = 0; j < num_render_calls; ++j) {
+      metrics.ReportRenderCall();
+      ++render_surplus;
+    }
+
+    ASSERT_LE(kMinJitter, render_surplus);
+    const int num_capture_calls =
+        render_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+    for (int j = 0; j < num_capture_calls; ++j) {
+      metrics.ReportCaptureCall();
+
+      if (metrics.WillReportMetricsAtNextCapture()) {
+        EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+        EXPECT_EQ(kJitterPeak, metrics.render_jitter().max());
+        EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+        EXPECT_EQ(kMinJitter + 1, metrics.capture_jitter().max());
+      }
+      --render_surplus;
+    }
+  }
+}
+
+// Verify peaky jitter for the capture.
+TEST(ApiCallJitterMetrics, JitterPeakCapture) {
+  constexpr int kMinJitter = 2;
+  constexpr int kJitterPeak = 10;
+  constexpr int kPeakInterval = 100;
+
+  ApiCallJitterMetrics metrics;
+  int capture_surplus = kMinJitter;
+
+  for (size_t k = 0; k < 30 * kNumBlocksPerSecond; ++k) {
+    ASSERT_LE(kMinJitter, capture_surplus);
+    const int num_render_calls =
+        capture_surplus == kMinJitter ? kMinJitter : kMinJitter + 1;
+    for (int j = 0; j < num_render_calls; ++j) {
+      metrics.ReportRenderCall();
+      --capture_surplus;
+    }
+
+    const int num_capture_calls =
+        k % kPeakInterval == 0 ? kJitterPeak : kMinJitter;
+    for (int j = 0; j < num_capture_calls; ++j) {
+      metrics.ReportCaptureCall();
+
+      if (metrics.WillReportMetricsAtNextCapture()) {
+        EXPECT_EQ(kMinJitter, metrics.render_jitter().min());
+        EXPECT_EQ(kMinJitter + 1, metrics.render_jitter().max());
+        EXPECT_EQ(kMinJitter, metrics.capture_jitter().min());
+        EXPECT_EQ(kJitterPeak, metrics.capture_jitter().max());
+      }
+      ++capture_surplus;
+    }
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/block_processor.cc b/modules/audio_processing/aec3/block_processor.cc
index 590380f..ef25e7c 100644
--- a/modules/audio_processing/aec3/block_processor.cc
+++ b/modules/audio_processing/aec3/block_processor.cc
@@ -194,6 +194,8 @@
     }
   }
 
+  echo_path_variability.clock_drift = delay_controller_->HasClockdrift();
+
   // Remove the echo from the capture signal.
   echo_remover_->ProcessCapture(
       echo_path_variability, capture_signal_saturation, estimated_delay_,
diff --git a/modules/audio_processing/aec3/block_processor2.cc b/modules/audio_processing/aec3/block_processor2.cc
index 3616427..30bd3ee 100644
--- a/modules/audio_processing/aec3/block_processor2.cc
+++ b/modules/audio_processing/aec3/block_processor2.cc
@@ -166,6 +166,8 @@
     }
   }
 
+  echo_path_variability.clock_drift = delay_controller_->HasClockdrift();
+
   // Remove the echo from the capture signal.
   echo_remover_->ProcessCapture(
       echo_path_variability, capture_signal_saturation, estimated_delay_,
diff --git a/modules/audio_processing/aec3/clockdrift_detector.cc b/modules/audio_processing/aec3/clockdrift_detector.cc
new file mode 100644
index 0000000..2c49b79
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector.cc
@@ -0,0 +1,61 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+namespace webrtc {
+
+ClockdriftDetector::ClockdriftDetector()
+    : level_(Level::kNone), stability_counter_(0) {
+  delay_history_.fill(0);
+}
+
+ClockdriftDetector::~ClockdriftDetector() = default;
+
+void ClockdriftDetector::Update(int delay_estimate) {
+  if (delay_estimate == delay_history_[0]) {
+    // Reset clockdrift level if delay estimate is stable for 7500 blocks (30
+    // seconds).
+    if (++stability_counter_ > 7500)
+      level_ = Level::kNone;
+    return;
+  }
+
+  stability_counter_ = 0;
+  const int d1 = delay_history_[0] - delay_estimate;
+  const int d2 = delay_history_[1] - delay_estimate;
+  const int d3 = delay_history_[2] - delay_estimate;
+
+  // Patterns recognized as positive clockdrift:
+  // [x-3], x-2, x-1, x.
+  // [x-3], x-1, x-2, x.
+  const bool probable_drift_up =
+      (d1 == -1 && d2 == -2) || (d1 == -2 && d2 == -1);
+  const bool drift_up = probable_drift_up && d3 == -3;
+
+  // Patterns recognized as negative clockdrift:
+  // [x+3], x+2, x+1, x.
+  // [x+3], x+1, x+2, x.
+  const bool probable_drift_down = (d1 == 1 && d2 == 2) || (d1 == 2 && d2 == 1);
+  const bool drift_down = probable_drift_down && d3 == 3;
+
+  // Set clockdrift level.
+  if (drift_up || drift_down) {
+    level_ = Level::kVerified;
+  } else if ((probable_drift_up || probable_drift_down) &&
+             level_ == Level::kNone) {
+    level_ = Level::kProbable;
+  }
+
+  // Shift delay history one step.
+  delay_history_[2] = delay_history_[1];
+  delay_history_[1] = delay_history_[0];
+  delay_history_[0] = delay_estimate;
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/clockdrift_detector.h b/modules/audio_processing/aec3/clockdrift_detector.h
new file mode 100644
index 0000000..22528c9
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector.h
@@ -0,0 +1,38 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
+
+#include <array>
+
+namespace webrtc {
+
+class ApmDataDumper;
+struct DownsampledRenderBuffer;
+struct EchoCanceller3Config;
+
+// Detects clockdrift by analyzing the estimated delay.
+class ClockdriftDetector {
+ public:
+  enum class Level { kNone, kProbable, kVerified, kNumCategories };
+  ClockdriftDetector();
+  ~ClockdriftDetector();
+  void Update(int delay_estimate);
+  Level ClockdriftLevel() const { return level_; }
+
+ private:
+  std::array<int, 3> delay_history_;
+  Level level_;
+  size_t stability_counter_;
+};
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_CLOCKDRIFT_DETECTOR_H_
diff --git a/modules/audio_processing/aec3/clockdrift_detector_unittest.cc b/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
new file mode 100644
index 0000000..0f98b01
--- /dev/null
+++ b/modules/audio_processing/aec3/clockdrift_detector_unittest.cc
@@ -0,0 +1,57 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+TEST(ClockdriftDetector, ClockdriftDetector) {
+  ClockdriftDetector c;
+  // No clockdrift at start.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+  // Monotonically increasing delay.
+  for (int i = 0; i < 100; i++)
+    c.Update(1000);
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+  for (int i = 0; i < 100; i++)
+    c.Update(1001);
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+  for (int i = 0; i < 100; i++)
+    c.Update(1002);
+  // Probable clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+  for (int i = 0; i < 100; i++)
+    c.Update(1003);
+  // Verified clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+
+  // Stable delay.
+  for (int i = 0; i < 10000; i++)
+    c.Update(1003);
+  // No clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kNone);
+
+  // Decreasing delay.
+  for (int i = 0; i < 100; i++)
+    c.Update(1001);
+  for (int i = 0; i < 100; i++)
+    c.Update(999);
+  // Probable clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kProbable);
+  for (int i = 0; i < 100; i++)
+    c.Update(1000);
+  for (int i = 0; i < 100; i++)
+    c.Update(998);
+  // Verified clockdrift.
+  EXPECT_TRUE(c.ClockdriftLevel() == ClockdriftDetector::Level::kVerified);
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_canceller3.cc b/modules/audio_processing/aec3/echo_canceller3.cc
index 5debcda..f05edb1 100644
--- a/modules/audio_processing/aec3/echo_canceller3.cc
+++ b/modules/audio_processing/aec3/echo_canceller3.cc
@@ -68,14 +68,12 @@
   return field_trial::IsEnabled("WebRTC-Aec3UseLegacyNormalSuppressorTuning");
 }
 
-bool DeactivateStationarityProperties() {
-  return field_trial::IsEnabled(
-      "WebRTC-Aec3UseStationarityPropertiesKillSwitch");
+bool ActivateStationarityProperties() {
+  return field_trial::IsEnabled("WebRTC-Aec3UseStationarityProperties");
 }
 
-bool DeactivateStationarityPropertiesAtInit() {
-  return field_trial::IsEnabled(
-      "WebRTC-Aec3UseStationarityPropertiesAtInitKillSwitch");
+bool ActivateStationarityPropertiesAtInit() {
+  return field_trial::IsEnabled("WebRTC-Aec3UseStationarityPropertiesAtInit");
 }
 
 bool EnableNewRenderBuffering() {
@@ -154,15 +152,12 @@
     adjusted_cfg.suppressor.dominant_nearend_detection.hold_duration = 25;
   }
 
-  // TODO(peah): Clean this up once upstream dependencies that forces this to
-  // zero are resolved.
-  adjusted_cfg.echo_audibility.use_stationary_properties = true;
-  if (DeactivateStationarityProperties()) {
-    adjusted_cfg.echo_audibility.use_stationary_properties = false;
+  if (ActivateStationarityProperties()) {
+    adjusted_cfg.echo_audibility.use_stationary_properties = true;
   }
 
-  if (DeactivateStationarityPropertiesAtInit()) {
-    adjusted_cfg.echo_audibility.use_stationarity_properties_at_init = false;
+  if (ActivateStationarityPropertiesAtInit()) {
+    adjusted_cfg.echo_audibility.use_stationarity_properties_at_init = true;
   }
 
   if (!UseEarlyDelayDetection()) {
@@ -451,6 +446,10 @@
   data_dumper_->DumpRaw("aec3_call_order",
                         static_cast<int>(EchoCanceller3ApiCall::kCapture));
 
+  // Report capture call in the metrics and periodically update API call
+  // metrics.
+  api_call_metrics_.ReportCaptureCall();
+
   // Optionally delay the capture signal.
   if (config_.delay.fixed_capture_delay_samples > 0) {
     block_delay_buffer_.DelaySignal(capture);
@@ -505,6 +504,9 @@
   bool frame_to_buffer =
       render_transfer_queue_.Remove(&render_queue_output_frame_);
   while (frame_to_buffer) {
+    // Report render call in the metrics.
+    api_call_metrics_.ReportRenderCall();
+
     BufferRenderFrameContent(&render_queue_output_frame_, 0, &render_blocker_,
                              block_processor_.get(), &block_, &sub_frame_view_);
 
diff --git a/modules/audio_processing/aec3/echo_canceller3.h b/modules/audio_processing/aec3/echo_canceller3.h
index 0d07702..671d271 100644
--- a/modules/audio_processing/aec3/echo_canceller3.h
+++ b/modules/audio_processing/aec3/echo_canceller3.h
@@ -18,6 +18,7 @@
 #include "api/array_view.h"
 #include "api/audio/echo_canceller3_config.h"
 #include "api/audio/echo_control.h"
+#include "modules/audio_processing/aec3/api_call_jitter_metrics.h"
 #include "modules/audio_processing/aec3/block_delay_buffer.h"
 #include "modules/audio_processing/aec3/block_framer.h"
 #include "modules/audio_processing/aec3/block_processor.h"
@@ -140,6 +141,7 @@
   std::vector<rtc::ArrayView<float>> sub_frame_view_
       RTC_GUARDED_BY(capture_race_checker_);
   BlockDelayBuffer block_delay_buffer_ RTC_GUARDED_BY(capture_race_checker_);
+  ApiCallJitterMetrics api_call_metrics_ RTC_GUARDED_BY(capture_race_checker_);
 
   RTC_DISALLOW_IMPLICIT_CONSTRUCTORS(EchoCanceller3);
 };
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.cc b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
index 5c838ae..6069ed6 100644
--- a/modules/audio_processing/aec3/echo_path_delay_estimator.cc
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.cc
@@ -73,6 +73,12 @@
       matched_filter_lag_aggregator_.Aggregate(
           matched_filter_.GetLagEstimates());
 
+  // Run clockdrift detection.
+  if (aggregated_matched_filter_lag &&
+      (*aggregated_matched_filter_lag).quality ==
+          DelayEstimate::Quality::kRefined)
+    clockdrift_detector_.Update((*aggregated_matched_filter_lag).delay);
+
   // TODO(peah): Move this logging outside of this class once EchoCanceller3
   // development is done.
   data_dumper_->DumpRaw(
@@ -112,5 +118,4 @@
   old_aggregated_lag_ = absl::nullopt;
   consistent_estimate_counter_ = 0;
 }
-
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/echo_path_delay_estimator.h b/modules/audio_processing/aec3/echo_path_delay_estimator.h
index 060c875..1f14735 100644
--- a/modules/audio_processing/aec3/echo_path_delay_estimator.h
+++ b/modules/audio_processing/aec3/echo_path_delay_estimator.h
@@ -15,6 +15,7 @@
 
 #include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
 #include "modules/audio_processing/aec3/decimator.h"
 #include "modules/audio_processing/aec3/delay_estimate.h"
 #include "modules/audio_processing/aec3/matched_filter.h"
@@ -49,6 +50,11 @@
                                         down_sampling_factor_);
   }
 
+  // Returns the level of detected clockdrift.
+  ClockdriftDetector::Level Clockdrift() const {
+    return clockdrift_detector_.ClockdriftLevel();
+  }
+
  private:
   ApmDataDumper* const data_dumper_;
   const size_t down_sampling_factor_;
@@ -58,6 +64,7 @@
   MatchedFilterLagAggregator matched_filter_lag_aggregator_;
   absl::optional<DelayEstimate> old_aggregated_lag_;
   size_t consistent_estimate_counter_ = 0;
+  ClockdriftDetector clockdrift_detector_;
 
   // Internal reset method with more granularity.
   void Reset(bool reset_lag_aggregator, bool reset_delay_confidence);
diff --git a/modules/audio_processing/aec3/erle_estimator.cc b/modules/audio_processing/aec3/erle_estimator.cc
index 539a59b..656a9c7 100644
--- a/modules/audio_processing/aec3/erle_estimator.cc
+++ b/modules/audio_processing/aec3/erle_estimator.cc
@@ -10,20 +10,18 @@
 
 #include "modules/audio_processing/aec3/erle_estimator.h"
 
-#include "api/array_view.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
-#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 
 namespace webrtc {
 
 ErleEstimator::ErleEstimator(size_t startup_phase_length_blocks_,
-                             float min_erle,
-                             float max_erle_lf,
-                             float max_erle_hf)
+                             const EchoCanceller3Config& config)
     : startup_phase_length_blocks__(startup_phase_length_blocks_),
-      fullband_erle_estimator_(min_erle, max_erle_lf),
-      subband_erle_estimator_(min_erle, max_erle_lf, max_erle_hf) {
+      use_signal_dependent_erle_(config.erle.num_sections > 1),
+      fullband_erle_estimator_(config.erle.min, config.erle.max_l),
+      subband_erle_estimator_(config),
+      signal_dependent_erle_estimator_(config) {
   Reset(true);
 }
 
@@ -32,16 +30,21 @@
 void ErleEstimator::Reset(bool delay_change) {
   fullband_erle_estimator_.Reset();
   subband_erle_estimator_.Reset();
+  signal_dependent_erle_estimator_.Reset();
   if (delay_change) {
     blocks_since_reset_ = 0;
   }
 }
 
-void ErleEstimator::Update(rtc::ArrayView<const float> reverb_render_spectrum,
-                           rtc::ArrayView<const float> capture_spectrum,
-                           rtc::ArrayView<const float> subtractor_spectrum,
-                           bool converged_filter,
-                           bool onset_detection) {
+void ErleEstimator::Update(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<const float> reverb_render_spectrum,
+    rtc::ArrayView<const float> capture_spectrum,
+    rtc::ArrayView<const float> subtractor_spectrum,
+    bool converged_filter,
+    bool onset_detection) {
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, reverb_render_spectrum.size());
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, capture_spectrum.size());
   RTC_DCHECK_EQ(kFftLengthBy2Plus1, subtractor_spectrum.size());
@@ -55,6 +58,13 @@
 
   subband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filter,
                                  onset_detection);
+
+  if (use_signal_dependent_erle_) {
+    signal_dependent_erle_estimator_.Update(
+        render_buffer, filter_frequency_response, X2_reverb, Y2, E2,
+        subband_erle_estimator_.Erle(), converged_filter);
+  }
+
   fullband_erle_estimator_.Update(X2_reverb, Y2, E2, converged_filter);
 }
 
@@ -62,6 +72,7 @@
     const std::unique_ptr<ApmDataDumper>& data_dumper) const {
   fullband_erle_estimator_.Dump(data_dumper);
   subband_erle_estimator_.Dump(data_dumper);
+  signal_dependent_erle_estimator_.Dump(data_dumper);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/erle_estimator.h b/modules/audio_processing/aec3/erle_estimator.h
index 2d2c3ae..8036c21 100644
--- a/modules/audio_processing/aec3/erle_estimator.h
+++ b/modules/audio_processing/aec3/erle_estimator.h
@@ -17,8 +17,11 @@
 
 #include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
 #include "modules/audio_processing/aec3/fullband_erle_estimator.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
 #include "modules/audio_processing/aec3/subband_erle_estimator.h"
 #include "modules/audio_processing/logging/apm_data_dumper.h"
 
@@ -29,16 +32,17 @@
 class ErleEstimator {
  public:
   ErleEstimator(size_t startup_phase_length_blocks_,
-                float min_erle,
-                float max_erle_lf,
-                float max_erle_hf);
+                const EchoCanceller3Config& config);
   ~ErleEstimator();
 
   // Resets the fullband ERLE estimator and the subbands ERLE estimators.
   void Reset(bool delay_change);
 
   // Updates the ERLE estimates.
-  void Update(rtc::ArrayView<const float> reverb_render_spectrum,
+  void Update(const RenderBuffer& render_buffer,
+              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                  filter_frequency_response,
+              rtc::ArrayView<const float> reverb_render_spectrum,
               rtc::ArrayView<const float> capture_spectrum,
               rtc::ArrayView<const float> subtractor_spectrum,
               bool converged_filter,
@@ -46,11 +50,12 @@
 
   // Returns the most recent subband ERLE estimates.
   const std::array<float, kFftLengthBy2Plus1>& Erle() const {
-    return subband_erle_estimator_.Erle();
+    return use_signal_dependent_erle_ ? signal_dependent_erle_estimator_.Erle()
+                                      : subband_erle_estimator_.Erle();
   }
   // Returns the subband ERLE that are estimated during onsets. Used
   // for logging/testing.
-  const std::array<float, kFftLengthBy2Plus1>& ErleOnsets() const {
+  rtc::ArrayView<const float> ErleOnsets() const {
     return subband_erle_estimator_.ErleOnsets();
   }
 
@@ -71,8 +76,10 @@
 
  private:
   const size_t startup_phase_length_blocks__;
+  const bool use_signal_dependent_erle_;
   FullBandErleEstimator fullband_erle_estimator_;
   SubbandErleEstimator subband_erle_estimator_;
+  SignalDependentErleEstimator signal_dependent_erle_estimator_;
   size_t blocks_since_reset_ = 0;
 };
 
diff --git a/modules/audio_processing/aec3/erle_estimator_unittest.cc b/modules/audio_processing/aec3/erle_estimator_unittest.cc
index 2cb050a..59a7471 100644
--- a/modules/audio_processing/aec3/erle_estimator_unittest.cc
+++ b/modules/audio_processing/aec3/erle_estimator_unittest.cc
@@ -12,6 +12,9 @@
 
 #include "api/array_view.h"
 #include "modules/audio_processing/aec3/erle_estimator.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "modules/audio_processing/aec3/vector_buffer.h"
+#include "rtc_base/random.h"
 #include "test/gtest.h"
 
 namespace webrtc {
@@ -19,11 +22,9 @@
 namespace {
 
 constexpr int kLowFrequencyLimit = kFftLengthBy2 / 2;
-constexpr float kMaxErleLf = 8.f;
-constexpr float kMaxErleHf = 1.5f;
-constexpr float kMinErle = 1.0f;
 constexpr float kTrueErle = 10.f;
 constexpr float kTrueErleOnsets = 1.0f;
+constexpr float kEchoPathGain = 3.f;
 
 void VerifyErleBands(rtc::ArrayView<const float> erle,
                      float reference_lf,
@@ -44,80 +45,157 @@
   EXPECT_NEAR(reference_lf, erle_time_domain, 0.5);
 }
 
-void FormFarendFrame(std::array<float, kFftLengthBy2Plus1>* X2,
+void FormFarendTimeFrame(rtc::ArrayView<float> x) {
+  const std::array<float, kBlockSize> frame = {
+      7459.88, 17209.6, 17383,   20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+      6665.52, 14808.6, 9342.3,  7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+      7166.32, 6856.61, 21937,   7263.14, 9569.07, 14919,   8413.32, 7551.89,
+      7848.65, 6011.27, 13080.6, 15865.2, 12656,   17459.6, 4263.93, 4503.03,
+      9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+      11405,   15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+      1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+      12416.2, 16434,   2454.69, 9840.8,  6867.23, 1615.75, 6059.9,  8394.19};
+  RTC_DCHECK_GE(x.size(), frame.size());
+  std::copy(frame.begin(), frame.end(), x.begin());
+}
+
+void FormFarendFrame(const RenderBuffer& render_buffer,
+                     std::array<float, kFftLengthBy2Plus1>* X2,
                      std::array<float, kFftLengthBy2Plus1>* E2,
                      std::array<float, kFftLengthBy2Plus1>* Y2,
                      float erle) {
-  X2->fill(500 * 1000.f * 1000.f);
-  E2->fill(1000.f * 1000.f);
-  Y2->fill(erle * (*E2)[0]);
-}
+  const auto& spectrum_buffer = render_buffer.GetSpectrumBuffer();
+  const auto& X2_from_buffer = spectrum_buffer.buffer[spectrum_buffer.write];
+  std::copy(X2_from_buffer.begin(), X2_from_buffer.end(), X2->begin());
+  std::transform(X2->begin(), X2->end(), Y2->begin(),
+                 [](float a) { return a * kEchoPathGain * kEchoPathGain; });
+  std::transform(Y2->begin(), Y2->end(), E2->begin(),
+                 [erle](float a) { return a / erle; });
 
-void FormNearendFrame(std::array<float, kFftLengthBy2Plus1>* X2,
+}  // namespace
+
+void FormNearendFrame(rtc::ArrayView<float> x,
+                      std::array<float, kFftLengthBy2Plus1>* X2,
                       std::array<float, kFftLengthBy2Plus1>* E2,
                       std::array<float, kFftLengthBy2Plus1>* Y2) {
+  x[0] = 0.f;
   X2->fill(0.f);
   Y2->fill(500.f * 1000.f * 1000.f);
   E2->fill((*Y2)[0]);
 }
 
+void GetFilterFreq(std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                       filter_frequency_response,
+                   size_t delay_headroom_blocks) {
+  RTC_DCHECK_GE(filter_frequency_response.size(), delay_headroom_blocks);
+  for (auto& block_freq_resp : filter_frequency_response) {
+    block_freq_resp.fill(0.f);
+  }
+
+  for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+    filter_frequency_response[delay_headroom_blocks][k] = kEchoPathGain;
+  }
+}
+
 }  // namespace
 
 TEST(ErleEstimator, VerifyErleIncreaseAndHold) {
   std::array<float, kFftLengthBy2Plus1> X2;
   std::array<float, kFftLengthBy2Plus1> E2;
   std::array<float, kFftLengthBy2Plus1> Y2;
+  EchoCanceller3Config config;
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> filter_frequency_response(
+      config.filter.main.length_blocks);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create2(config, 3));
 
-  ErleEstimator estimator(0, kMinErle, kMaxErleLf, kMaxErleHf);
+  GetFilterFreq(filter_frequency_response, config.delay.delay_headroom_blocks);
 
+  ErleEstimator estimator(0, config);
+
+  FormFarendTimeFrame(x[0]);
+  render_delay_buffer->Insert(x);
+  render_delay_buffer->PrepareCaptureProcessing();
   // Verifies that the ERLE estimate is properly increased to higher values.
-  FormFarendFrame(&X2, &E2, &Y2, kTrueErle);
-
+  FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                  kTrueErle);
   for (size_t k = 0; k < 200; ++k) {
-    estimator.Update(X2, Y2, E2, true, true);
+    render_delay_buffer->Insert(x);
+    render_delay_buffer->PrepareCaptureProcessing();
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMaxErleLf, kMaxErleHf);
+             config.erle.max_l, config.erle.max_h);
 
-  FormNearendFrame(&X2, &E2, &Y2);
+  FormNearendFrame(x[0], &X2, &E2, &Y2);
   // Verifies that the ERLE is not immediately decreased during nearend
   // activity.
   for (size_t k = 0; k < 50; ++k) {
-    estimator.Update(X2, Y2, E2, true, true);
+    render_delay_buffer->Insert(x);
+    render_delay_buffer->PrepareCaptureProcessing();
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMaxErleLf, kMaxErleHf);
+             config.erle.max_l, config.erle.max_h);
 }
 
 TEST(ErleEstimator, VerifyErleTrackingOnOnsets) {
   std::array<float, kFftLengthBy2Plus1> X2;
   std::array<float, kFftLengthBy2Plus1> E2;
   std::array<float, kFftLengthBy2Plus1> Y2;
+  EchoCanceller3Config config;
+  std::vector<std::vector<float>> x(3, std::vector<float>(kBlockSize, 0.f));
+  std::vector<std::array<float, kFftLengthBy2Plus1>> filter_frequency_response(
+      config.filter.main.length_blocks);
 
-  ErleEstimator estimator(0, kMinErle, kMaxErleLf, kMaxErleHf);
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer(
+      RenderDelayBuffer::Create2(config, 3));
+
+  GetFilterFreq(filter_frequency_response, config.delay.delay_headroom_blocks);
+
+  ErleEstimator estimator(0, config);
+
+  FormFarendTimeFrame(x[0]);
+  render_delay_buffer->Insert(x);
+  render_delay_buffer->PrepareCaptureProcessing();
 
   for (size_t burst = 0; burst < 20; ++burst) {
-    FormFarendFrame(&X2, &E2, &Y2, kTrueErleOnsets);
+    FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                    kTrueErleOnsets);
     for (size_t k = 0; k < 10; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
-    FormFarendFrame(&X2, &E2, &Y2, kTrueErle);
+    FormFarendFrame(*render_delay_buffer->GetRenderBuffer(), &X2, &E2, &Y2,
+                    kTrueErle);
     for (size_t k = 0; k < 200; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
-    FormNearendFrame(&X2, &E2, &Y2);
+    FormNearendFrame(x[0], &X2, &E2, &Y2);
     for (size_t k = 0; k < 300; ++k) {
-      estimator.Update(X2, Y2, E2, true, true);
+      render_delay_buffer->Insert(x);
+      render_delay_buffer->PrepareCaptureProcessing();
+      estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                       filter_frequency_response, X2, Y2, E2, true, true);
     }
   }
-  VerifyErleBands(estimator.ErleOnsets(), kMinErle, kMinErle);
-  FormNearendFrame(&X2, &E2, &Y2);
+  VerifyErleBands(estimator.ErleOnsets(), config.erle.min, config.erle.min);
+  FormNearendFrame(x[0], &X2, &E2, &Y2);
   for (size_t k = 0; k < 1000; k++) {
-    estimator.Update(X2, Y2, E2, true, true);
+    estimator.Update(*render_delay_buffer->GetRenderBuffer(),
+                     filter_frequency_response, X2, Y2, E2, true, true);
   }
   // Verifies that during ne activity, Erle converges to the Erle for onsets.
   VerifyErle(estimator.Erle(), std::pow(2.f, estimator.FullbandErleLog2()),
-             kMinErle, kMinErle);
+             config.erle.min, config.erle.min);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/filter_analyzer.cc b/modules/audio_processing/aec3/filter_analyzer.cc
index 5b890d7..3e69be6 100644
--- a/modules/audio_processing/aec3/filter_analyzer.cc
+++ b/modules/audio_processing/aec3/filter_analyzer.cc
@@ -25,18 +25,22 @@
 namespace webrtc {
 namespace {
 
-size_t FindPeakIndex(rtc::ArrayView<const float> filter_time_domain) {
-  size_t peak_index = 0;
-  float max_h2 = filter_time_domain[0] * filter_time_domain[0];
-  for (size_t k = 1; k < filter_time_domain.size(); ++k) {
+size_t FindPeakIndex(rtc::ArrayView<const float> filter_time_domain,
+                     size_t peak_index_in,
+                     size_t start_sample,
+                     size_t end_sample) {
+  size_t peak_index_out = peak_index_in;
+  float max_h2 =
+      filter_time_domain[peak_index_out] * filter_time_domain[peak_index_out];
+  for (size_t k = start_sample; k <= end_sample; ++k) {
     float tmp = filter_time_domain[k] * filter_time_domain[k];
     if (tmp > max_h2) {
-      peak_index = k;
+      peak_index_out = k;
       max_h2 = tmp;
     }
   }
 
-  return peak_index;
+  return peak_index_out;
 }
 
 bool EnableFilterPreprocessing() {
@@ -44,6 +48,11 @@
       "WebRTC-Aec3FilterAnalyzerPreprocessorKillSwitch");
 }
 
+bool EnableIncrementalAnalysis() {
+  return !field_trial::IsEnabled(
+      "WebRTC-Aec3FilterAnalyzerIncrementalAnalysisKillSwitch");
+}
+
 }  // namespace
 
 int FilterAnalyzer::instance_count_ = 0;
@@ -54,46 +63,37 @@
       use_preprocessed_filter_(EnableFilterPreprocessing()),
       bounded_erl_(config.ep_strength.bounded_erl),
       default_gain_(config.ep_strength.lf),
-      active_render_threshold_(config.render_levels.active_render_limit *
-                               config.render_levels.active_render_limit *
-                               kFftLengthBy2),
+      use_incremental_analysis_(EnableIncrementalAnalysis()),
       h_highpass_(GetTimeDomainLength(config.filter.main.length_blocks), 0.f),
-      filter_length_blocks_(config.filter.main_initial.length_blocks) {
+      filter_length_blocks_(config.filter.main_initial.length_blocks),
+      consistent_filter_detector_(config) {
   Reset();
 }
 
-void FilterAnalyzer::PreProcessFilter(
-    rtc::ArrayView<const float> filter_time_domain) {
-  RTC_DCHECK_GE(h_highpass_.capacity(), filter_time_domain.size());
-  h_highpass_.resize(filter_time_domain.size());
-  // Minimum phase high-pass filter with cutoff frequency at about 600 Hz.
-  constexpr std::array<float, 3> h = {{0.7929742f, -0.36072128f, -0.47047766f}};
-
-  std::fill(h_highpass_.begin(), h_highpass_.end(), 0.f);
-  for (size_t k = h.size() - 1; k < filter_time_domain.size(); ++k) {
-    for (size_t j = 0; j < h.size(); ++j) {
-      h_highpass_[k] += filter_time_domain[k - j] * h[j];
-    }
-  }
-}
-
 FilterAnalyzer::~FilterAnalyzer() = default;
 
 void FilterAnalyzer::Reset() {
   delay_blocks_ = 0;
-  consistent_estimate_ = false;
   blocks_since_reset_ = 0;
-  consistent_estimate_ = false;
-  consistent_estimate_counter_ = 0;
-  consistent_delay_reference_ = -10;
   gain_ = default_gain_;
+  peak_index_ = 0;
+  ResetRegion();
+  consistent_filter_detector_.Reset();
 }
 
-void FilterAnalyzer::Update(
+void FilterAnalyzer::Update(rtc::ArrayView<const float> filter_time_domain,
+                            const RenderBuffer& render_buffer) {
+  SetRegionToAnalyze(filter_time_domain);
+  AnalyzeRegion(filter_time_domain, render_buffer);
+}
+
+void FilterAnalyzer::AnalyzeRegion(
     rtc::ArrayView<const float> filter_time_domain,
-    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
-        filter_freq_response,
     const RenderBuffer& render_buffer) {
+  RTC_DCHECK_LT(region_.start_sample_, filter_time_domain.size());
+  RTC_DCHECK_LT(peak_index_, filter_time_domain.size());
+  RTC_DCHECK_LT(region_.end_sample_, filter_time_domain.size());
+
   // Preprocess the filter to avoid issues with low-frequency components in the
   // filter.
   PreProcessFilter(filter_time_domain);
@@ -103,51 +103,15 @@
       use_preprocessed_filter_ ? h_highpass_ : filter_time_domain;
   RTC_DCHECK_EQ(filter_to_analyze.size(), filter_time_domain.size());
 
-  size_t peak_index = FindPeakIndex(filter_to_analyze);
-  delay_blocks_ = peak_index >> kBlockSizeLog2;
-  UpdateFilterGain(filter_to_analyze, peak_index);
-
-  float filter_floor = 0;
-  float filter_secondary_peak = 0;
-  size_t limit1 = peak_index < 64 ? 0 : peak_index - 64;
-  size_t limit2 =
-      peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128;
-
-  for (size_t k = 0; k < limit1; ++k) {
-    float abs_h = fabsf(filter_to_analyze[k]);
-    filter_floor += abs_h;
-    filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
-  }
-  for (size_t k = limit2; k < filter_to_analyze.size(); ++k) {
-    float abs_h = fabsf(filter_to_analyze[k]);
-    filter_floor += abs_h;
-    filter_secondary_peak = std::max(filter_secondary_peak, abs_h);
-  }
-
-  filter_floor /= (limit1 + filter_to_analyze.size() - limit2);
-
-  float abs_peak = fabsf(filter_to_analyze[peak_index]);
-  bool significant_peak_index =
-      abs_peak > 10.f * filter_floor && abs_peak > 2.f * filter_secondary_peak;
-
-  if (consistent_delay_reference_ != delay_blocks_ || !significant_peak_index) {
-    consistent_estimate_counter_ = 0;
-    consistent_delay_reference_ = delay_blocks_;
-  } else {
-    const auto& x = render_buffer.Block(-delay_blocks_)[0];
-    const float x_energy =
-        std::inner_product(x.begin(), x.end(), x.begin(), 0.f);
-    const bool active_render_block = x_energy > active_render_threshold_;
-
-    if (active_render_block) {
-      ++consistent_estimate_counter_;
-    }
-  }
-
-  consistent_estimate_ =
-      consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond;
-
+  peak_index_ = FindPeakIndex(filter_to_analyze, peak_index_,
+                              region_.start_sample_, region_.end_sample_);
+  delay_blocks_ = peak_index_ >> kBlockSizeLog2;
+  UpdateFilterGain(filter_to_analyze, peak_index_);
   filter_length_blocks_ = filter_time_domain.size() * (1.f / kBlockSize);
+
+  consistent_estimate_ = consistent_filter_detector_.Detect(
+      filter_to_analyze, region_, render_buffer.Block(-delay_blocks_)[0],
+      peak_index_, delay_blocks_);
 }
 
 void FilterAnalyzer::UpdateFilterGain(
@@ -169,4 +133,114 @@
   }
 }
 
+void FilterAnalyzer::PreProcessFilter(
+    rtc::ArrayView<const float> filter_time_domain) {
+  RTC_DCHECK_GE(h_highpass_.capacity(), filter_time_domain.size());
+  h_highpass_.resize(filter_time_domain.size());
+  // Minimum phase high-pass filter with cutoff frequency at about 600 Hz.
+  constexpr std::array<float, 3> h = {{0.7929742f, -0.36072128f, -0.47047766f}};
+
+  std::fill(h_highpass_.begin() + region_.start_sample_,
+            h_highpass_.begin() + region_.end_sample_ + 1, 0.f);
+  for (size_t k = std::max(h.size() - 1, region_.start_sample_);
+       k <= region_.end_sample_; ++k) {
+    for (size_t j = 0; j < h.size(); ++j) {
+      h_highpass_[k] += filter_time_domain[k - j] * h[j];
+    }
+  }
+}
+
+void FilterAnalyzer::ResetRegion() {
+  region_.start_sample_ = 0;
+  region_.end_sample_ = 0;
+}
+
+void FilterAnalyzer::SetRegionToAnalyze(
+    rtc::ArrayView<const float> filter_time_domain) {
+  constexpr size_t kNumberBlocksToUpdate = 1;
+  auto& r = region_;
+  if (use_incremental_analysis_) {
+    r.start_sample_ =
+        r.end_sample_ == filter_time_domain.size() - 1 ? 0 : r.end_sample_ + 1;
+    r.end_sample_ =
+        std::min(r.start_sample_ + kNumberBlocksToUpdate * kBlockSize - 1,
+                 filter_time_domain.size() - 1);
+
+  } else {
+    r.start_sample_ = 0;
+    r.end_sample_ = filter_time_domain.size() - 1;
+  }
+}
+
+FilterAnalyzer::ConsistentFilterDetector::ConsistentFilterDetector(
+    const EchoCanceller3Config& config)
+    : active_render_threshold_(config.render_levels.active_render_limit *
+                               config.render_levels.active_render_limit *
+                               kFftLengthBy2) {}
+
+void FilterAnalyzer::ConsistentFilterDetector::Reset() {
+  significant_peak_ = false;
+  filter_floor_accum_ = 0.f;
+  filter_secondary_peak_ = 0.f;
+  filter_floor_low_limit_ = 0;
+  filter_floor_high_limit_ = 0;
+  consistent_estimate_counter_ = 0;
+  consistent_delay_reference_ = -10;
+}
+
+bool FilterAnalyzer::ConsistentFilterDetector::Detect(
+    rtc::ArrayView<const float> filter_to_analyze,
+    const FilterRegion& region,
+    rtc::ArrayView<const float> x_block,
+    size_t peak_index,
+    int delay_blocks) {
+  if (region.start_sample_ == 0) {
+    filter_floor_accum_ = 0.f;
+    filter_secondary_peak_ = 0.f;
+    filter_floor_low_limit_ = peak_index < 64 ? 0 : peak_index - 64;
+    filter_floor_high_limit_ =
+        peak_index > filter_to_analyze.size() - 129 ? 0 : peak_index + 128;
+  }
+
+  for (size_t k = region.start_sample_;
+       k < std::min(region.end_sample_ + 1, filter_floor_low_limit_); ++k) {
+    float abs_h = fabsf(filter_to_analyze[k]);
+    filter_floor_accum_ += abs_h;
+    filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h);
+  }
+
+  for (size_t k = std::max(filter_floor_high_limit_, region.start_sample_);
+       k <= region.end_sample_; ++k) {
+    float abs_h = fabsf(filter_to_analyze[k]);
+    filter_floor_accum_ += abs_h;
+    filter_secondary_peak_ = std::max(filter_secondary_peak_, abs_h);
+  }
+
+  if (region.end_sample_ == filter_to_analyze.size() - 1) {
+    float filter_floor = filter_floor_accum_ /
+                         (filter_floor_low_limit_ + filter_to_analyze.size() -
+                          filter_floor_high_limit_);
+
+    float abs_peak = fabsf(filter_to_analyze[peak_index]);
+    significant_peak_ = abs_peak > 10.f * filter_floor &&
+                        abs_peak > 2.f * filter_secondary_peak_;
+  }
+
+  if (significant_peak_) {
+    const float x_energy = std::inner_product(x_block.begin(), x_block.end(),
+                                              x_block.begin(), 0.f);
+    const bool active_render_block = x_energy > active_render_threshold_;
+
+    if (consistent_delay_reference_ == delay_blocks) {
+      if (active_render_block) {
+        ++consistent_estimate_counter_;
+      }
+    } else {
+      consistent_estimate_counter_ = 0;
+      consistent_delay_reference_ = delay_blocks;
+    }
+  }
+  return consistent_estimate_counter_ > 1.5f * kNumBlocksPerSecond;
+}
+
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/filter_analyzer.h b/modules/audio_processing/aec3/filter_analyzer.h
index 99a0e25..e0fd069 100644
--- a/modules/audio_processing/aec3/filter_analyzer.h
+++ b/modules/audio_processing/aec3/filter_analyzer.h
@@ -37,8 +37,6 @@
 
   // Updates the estimates with new input data.
   void Update(rtc::ArrayView<const float> filter_time_domain,
-              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
-                  filter_freq_response,
               const RenderBuffer& render_buffer);
 
   // Returns the delay of the filter in terms of blocks.
@@ -58,24 +56,61 @@
   rtc::ArrayView<const float> GetAdjustedFilter() const { return h_highpass_; }
 
  private:
+  void AnalyzeRegion(rtc::ArrayView<const float> filter_time_domain,
+                     const RenderBuffer& render_buffer);
+
   void UpdateFilterGain(rtc::ArrayView<const float> filter_time_domain,
                         size_t max_index);
   void PreProcessFilter(rtc::ArrayView<const float> filter_time_domain);
 
+  void ResetRegion();
+
+  void SetRegionToAnalyze(rtc::ArrayView<const float> filter_time_domain);
+
+  struct FilterRegion {
+    size_t start_sample_;
+    size_t end_sample_;
+  };
+
+  // This class checks whether the shape of the impulse response has been
+  // consistent over time.
+  class ConsistentFilterDetector {
+   public:
+    explicit ConsistentFilterDetector(const EchoCanceller3Config& config);
+    void Reset();
+    bool Detect(rtc::ArrayView<const float> filter_to_analyze,
+                const FilterRegion& region,
+                rtc::ArrayView<const float> x_block,
+                size_t peak_index,
+                int delay_blocks);
+
+   private:
+    bool significant_peak_;
+    float filter_floor_accum_;
+    float filter_secondary_peak_;
+    size_t filter_floor_low_limit_;
+    size_t filter_floor_high_limit_;
+    const float active_render_threshold_;
+    size_t consistent_estimate_counter_ = 0;
+    int consistent_delay_reference_ = -10;
+  };
+
   static int instance_count_;
   std::unique_ptr<ApmDataDumper> data_dumper_;
   const bool use_preprocessed_filter_;
   const bool bounded_erl_;
   const float default_gain_;
-  const float active_render_threshold_;
+  const bool use_incremental_analysis_;
   std::vector<float> h_highpass_;
   int delay_blocks_ = 0;
   size_t blocks_since_reset_ = 0;
   bool consistent_estimate_ = false;
-  size_t consistent_estimate_counter_ = 0;
-  int consistent_delay_reference_ = -10;
   float gain_;
+  size_t peak_index_;
   int filter_length_blocks_;
+  FilterRegion region_;
+  ConsistentFilterDetector consistent_filter_detector_;
+
   RTC_DISALLOW_COPY_AND_ASSIGN(FilterAnalyzer);
 };
 
diff --git a/modules/audio_processing/aec3/fullband_erle_estimator.cc b/modules/audio_processing/aec3/fullband_erle_estimator.cc
index dc74509..7893b97 100644
--- a/modules/audio_processing/aec3/fullband_erle_estimator.cc
+++ b/modules/audio_processing/aec3/fullband_erle_estimator.cc
@@ -26,7 +26,7 @@
 namespace {
 constexpr float kEpsilon = 1e-3f;
 constexpr float kX2BandEnergyThreshold = 44015068.0f;
-constexpr int kErleHold = 100;
+constexpr int kBlocksToHoldErle = 100;
 constexpr int kPointsToAccumulate = 6;
 }  // namespace
 
@@ -55,7 +55,7 @@
       const float Y2_sum = std::accumulate(Y2.begin(), Y2.end(), 0.0f);
       const float E2_sum = std::accumulate(E2.begin(), E2.end(), 0.0f);
       if (instantaneous_erle_.Update(Y2_sum, E2_sum)) {
-        hold_counter_time_domain_ = kErleHold;
+        hold_counter_time_domain_ = kBlocksToHoldErle;
         erle_time_domain_log2_ +=
             0.1f * ((instantaneous_erle_.GetInstErleLog2().value()) -
                     erle_time_domain_log2_);
diff --git a/modules/audio_processing/aec3/mock/mock_render_delay_controller.h b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
index 5520f76..5f652e1 100644
--- a/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
+++ b/modules/audio_processing/aec3/mock/mock_render_delay_controller.h
@@ -33,6 +33,7 @@
                    size_t render_delay_buffer_delay,
                    const absl::optional<int>& echo_remover_delay,
                    rtc::ArrayView<const float> capture));
+  MOCK_CONST_METHOD0(HasClockdrift, bool());
 };
 
 }  // namespace test
diff --git a/modules/audio_processing/aec3/render_delay_controller.cc b/modules/audio_processing/aec3/render_delay_controller.cc
index 36e75d9..c4665ea 100644
--- a/modules/audio_processing/aec3/render_delay_controller.cc
+++ b/modules/audio_processing/aec3/render_delay_controller.cc
@@ -64,6 +64,7 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) override;
+  bool HasClockdrift() const override;
 
  private:
   static int instance_count_;
@@ -285,7 +286,8 @@
 
   metrics_.Update(delay_samples_ ? absl::optional<size_t>(delay_samples_->delay)
                                  : absl::nullopt,
-                  delay_ ? delay_->delay : 0, skew_shift);
+                  delay_ ? delay_->delay : 0, skew_shift,
+                  delay_estimator_.Clockdrift());
 
   data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
                         delay_samples ? delay_samples->delay : 0);
@@ -301,6 +303,10 @@
   return delay_;
 }
 
+bool RenderDelayControllerImpl::HasClockdrift() const {
+  return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone;
+}
+
 }  // namespace
 
 RenderDelayController* RenderDelayController::Create(
diff --git a/modules/audio_processing/aec3/render_delay_controller.h b/modules/audio_processing/aec3/render_delay_controller.h
index 41ba422..b46ed89 100644
--- a/modules/audio_processing/aec3/render_delay_controller.h
+++ b/modules/audio_processing/aec3/render_delay_controller.h
@@ -44,6 +44,9 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) = 0;
+
+  // Returns true if clockdrift has been detected.
+  virtual bool HasClockdrift() const = 0;
 };
 }  // namespace webrtc
 
diff --git a/modules/audio_processing/aec3/render_delay_controller2.cc b/modules/audio_processing/aec3/render_delay_controller2.cc
index 1b7c18d..00daf8f 100644
--- a/modules/audio_processing/aec3/render_delay_controller2.cc
+++ b/modules/audio_processing/aec3/render_delay_controller2.cc
@@ -46,6 +46,7 @@
       size_t render_delay_buffer_delay,
       const absl::optional<int>& echo_remover_delay,
       rtc::ArrayView<const float> capture) override;
+  bool HasClockdrift() const override;
 
  private:
   static int instance_count_;
@@ -127,7 +128,7 @@
   delay_samples_ = absl::nullopt;
   delay_estimator_.Reset(reset_delay_confidence);
   delay_change_counter_ = 0;
-  if (reset_delay_confidence || true) {
+  if (reset_delay_confidence) {
     last_delay_estimate_quality_ = DelayEstimate::Quality::kCoarse;
   }
 }
@@ -192,7 +193,7 @@
 
   metrics_.Update(delay_samples_ ? absl::optional<size_t>(delay_samples_->delay)
                                  : absl::nullopt,
-                  delay_ ? delay_->delay : 0, 0);
+                  delay_ ? delay_->delay : 0, 0, delay_estimator_.Clockdrift());
 
   data_dumper_->DumpRaw("aec3_render_delay_controller_delay",
                         delay_samples ? delay_samples->delay : 0);
@@ -202,6 +203,10 @@
   return delay_;
 }
 
+bool RenderDelayControllerImpl2::HasClockdrift() const {
+  return delay_estimator_.Clockdrift() != ClockdriftDetector::Level::kNone;
+}
+
 }  // namespace
 
 RenderDelayController* RenderDelayController::Create2(
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.cc b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
index c51d468..582e033 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics.cc
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.cc
@@ -46,7 +46,8 @@
 void RenderDelayControllerMetrics::Update(
     absl::optional<size_t> delay_samples,
     size_t buffer_delay_blocks,
-    absl::optional<int> skew_shift_blocks) {
+    absl::optional<int> skew_shift_blocks,
+    ClockdriftDetector::Level clockdrift) {
   ++call_counter_;
 
   if (!initial_update) {
@@ -115,6 +116,10 @@
         static_cast<int>(delay_changes),
         static_cast<int>(DelayChangesCategory::kNumCategories));
 
+    RTC_HISTOGRAM_ENUMERATION(
+        "WebRTC.Audio.EchoCanceller.Clockdrift", static_cast<int>(clockdrift),
+        static_cast<int>(ClockdriftDetector::Level::kNumCategories));
+
     metrics_reported_ = true;
     call_counter_ = 0;
     ResetMetrics();
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics.h b/modules/audio_processing/aec3/render_delay_controller_metrics.h
index 50e60bb..22cc202 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics.h
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics.h
@@ -14,6 +14,7 @@
 #include <stddef.h>
 
 #include "absl/types/optional.h"
+#include "modules/audio_processing/aec3/clockdrift_detector.h"
 #include "rtc_base/constructormagic.h"
 
 namespace webrtc {
@@ -26,7 +27,8 @@
   // Updates the metric with new data.
   void Update(absl::optional<size_t> delay_samples,
               size_t buffer_delay_blocks,
-              absl::optional<int> skew_shift_blocks);
+              absl::optional<int> skew_shift_blocks,
+              ClockdriftDetector::Level clockdrift);
 
   // Returns true if the metrics have just been reported, otherwise false.
   bool MetricsReported() { return metrics_reported_; }
diff --git a/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
index e867de4..216b0e2 100644
--- a/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
+++ b/modules/audio_processing/aec3/render_delay_controller_metrics_unittest.cc
@@ -22,10 +22,12 @@
 
   for (int j = 0; j < 3; ++j) {
     for (int k = 0; k < kMetricsReportingIntervalBlocks - 1; ++k) {
-      metrics.Update(absl::nullopt, 0, absl::nullopt);
+      metrics.Update(absl::nullopt, 0, absl::nullopt,
+                     ClockdriftDetector::Level::kNone);
       EXPECT_FALSE(metrics.MetricsReported());
     }
-    metrics.Update(absl::nullopt, 0, absl::nullopt);
+    metrics.Update(absl::nullopt, 0, absl::nullopt,
+                   ClockdriftDetector::Level::kNone);
     EXPECT_TRUE(metrics.MetricsReported());
   }
 }
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc b/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
new file mode 100644
index 0000000..32b36ab
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator.cc
@@ -0,0 +1,368 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <functional>
+#include <numeric>
+
+#include "modules/audio_processing/aec3/vector_buffer.h"
+#include "rtc_base/numerics/safe_minmax.h"
+
+namespace webrtc {
+
+namespace {
+
+constexpr std::array<size_t, SignalDependentErleEstimator::kSubbands + 1>
+    kBandBoundaries = {1, 8, 16, 24, 32, 48, kFftLengthBy2Plus1};
+
+std::array<size_t, kFftLengthBy2Plus1> FormSubbandMap() {
+  std::array<size_t, kFftLengthBy2Plus1> map_band_to_subband;
+  size_t subband = 1;
+  for (size_t k = 0; k < map_band_to_subband.size(); ++k) {
+    RTC_DCHECK_LT(subband, kBandBoundaries.size());
+    if (k >= kBandBoundaries[subband]) {
+      subband++;
+      RTC_DCHECK_LT(k, kBandBoundaries[subband]);
+    }
+    map_band_to_subband[k] = subband - 1;
+  }
+  return map_band_to_subband;
+}
+
+// Defines the size in blocks of the sections that are used for dividing the
+// linear filter. The sections are split in a non-linear manner so that lower
+// sections that typically represent the direct path have a larger resolution
+// than the higher sections which typically represent more reverberant acoustic
+// paths.
+std::vector<size_t> DefineFilterSectionSizes(size_t delay_headroom_blocks,
+                                             size_t num_blocks,
+                                             size_t num_sections) {
+  size_t filter_length_blocks = num_blocks - delay_headroom_blocks;
+  std::vector<size_t> section_sizes(num_sections);
+  size_t remaining_blocks = filter_length_blocks;
+  size_t remaining_sections = num_sections;
+  size_t estimator_size = 2;
+  size_t idx = 0;
+  while (remaining_sections > 1 &&
+         remaining_blocks > estimator_size * remaining_sections) {
+    RTC_DCHECK_LT(idx, section_sizes.size());
+    section_sizes[idx] = estimator_size;
+    remaining_blocks -= estimator_size;
+    remaining_sections--;
+    estimator_size *= 2;
+    idx++;
+  }
+
+  size_t last_groups_size = remaining_blocks / remaining_sections;
+  for (; idx < num_sections; idx++) {
+    section_sizes[idx] = last_groups_size;
+  }
+  section_sizes[num_sections - 1] +=
+      remaining_blocks - last_groups_size * remaining_sections;
+  return section_sizes;
+}
+
+// Forms the limits in blocks for each filter section. Those sections
+// are used for analyzing the echo estimates and investigating which
+// linear filter sections contribute most to the echo estimate energy.
+std::vector<size_t> SetSectionsBoundaries(size_t delay_headroom_blocks,
+                                          size_t num_blocks,
+                                          size_t num_sections) {
+  std::vector<size_t> estimator_boundaries_blocks(num_sections + 1);
+  if (estimator_boundaries_blocks.size() == 2) {
+    estimator_boundaries_blocks[0] = 0;
+    estimator_boundaries_blocks[1] = num_blocks;
+    return estimator_boundaries_blocks;
+  }
+  RTC_DCHECK_GT(estimator_boundaries_blocks.size(), 2);
+  const std::vector<size_t> section_sizes =
+      DefineFilterSectionSizes(delay_headroom_blocks, num_blocks,
+                               estimator_boundaries_blocks.size() - 1);
+
+  size_t idx = 0;
+  size_t current_size_block = 0;
+  RTC_DCHECK_EQ(section_sizes.size() + 1, estimator_boundaries_blocks.size());
+  estimator_boundaries_blocks[0] = delay_headroom_blocks;
+  for (size_t k = delay_headroom_blocks; k < num_blocks; ++k) {
+    current_size_block++;
+    if (current_size_block >= section_sizes[idx]) {
+      idx = idx + 1;
+      if (idx == section_sizes.size()) {
+        break;
+      }
+      estimator_boundaries_blocks[idx] = k + 1;
+      current_size_block = 0;
+    }
+  }
+  estimator_boundaries_blocks[section_sizes.size()] = num_blocks;
+  return estimator_boundaries_blocks;
+}
+
+std::array<float, SignalDependentErleEstimator::kSubbands>
+SetMaxErleSubbands(float max_erle_l, float max_erle_h, size_t limit_subband_l) {
+  std::array<float, SignalDependentErleEstimator::kSubbands> max_erle;
+  std::fill(max_erle.begin(), max_erle.begin() + limit_subband_l, max_erle_l);
+  std::fill(max_erle.begin() + limit_subband_l, max_erle.end(), max_erle_h);
+  return max_erle;
+}
+
+}  // namespace
+
+SignalDependentErleEstimator::SignalDependentErleEstimator(
+    const EchoCanceller3Config& config)
+    : min_erle_(config.erle.min),
+      num_sections_(config.erle.num_sections),
+      num_blocks_(config.filter.main.length_blocks),
+      delay_headroom_blocks_(config.delay.delay_headroom_blocks),
+      band_to_subband_(FormSubbandMap()),
+      max_erle_(SetMaxErleSubbands(config.erle.max_l,
+                                   config.erle.max_h,
+                                   band_to_subband_[kFftLengthBy2 / 2])),
+      section_boundaries_blocks_(SetSectionsBoundaries(delay_headroom_blocks_,
+                                                       num_blocks_,
+                                                       num_sections_)),
+      S2_section_accum_(num_sections_),
+      erle_estimators_(num_sections_),
+      correction_factors_(num_sections_) {
+  RTC_DCHECK_LE(num_sections_, num_blocks_);
+  RTC_DCHECK_GE(num_sections_, 1);
+
+  Reset();
+}
+
+SignalDependentErleEstimator::~SignalDependentErleEstimator() = default;
+
+void SignalDependentErleEstimator::Reset() {
+  erle_.fill(min_erle_);
+  for (auto& erle : erle_estimators_) {
+    erle.fill(min_erle_);
+  }
+  erle_ref_.fill(min_erle_);
+  for (auto& factor : correction_factors_) {
+    factor.fill(1.0f);
+  }
+  num_updates_.fill(0);
+}
+
+// Updates the Erle estimate by analyzing the current input signals. It takes
+// the render buffer and the filter frequency response in order to do an
+// estimation of the number of sections of the linear filter that are needed
+// for getting the majority of the energy in the echo estimate. Based on that
+// number of sections, it updates the erle estimation by introducing a
+// correction factor to the erle that is given as an input to this method.
+void SignalDependentErleEstimator::Update(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2,
+    rtc::ArrayView<const float> average_erle,
+    bool converged_filter) {
+  RTC_DCHECK_GT(num_sections_, 1);
+
+  // Gets the number of filter sections that are needed for achieving 90 %
+  // of the power spectrum energy of the echo estimate.
+  std::array<size_t, kFftLengthBy2Plus1> n_active_sections;
+  ComputeNumberOfActiveFilterSections(render_buffer, filter_frequency_response,
+                                      n_active_sections);
+
+  if (converged_filter) {
+    // Updates the correction factor that is used for correcting the erle and
+    // adapt it to the particular characteristics of the input signal.
+    UpdateCorrectionFactors(X2, Y2, E2, n_active_sections);
+  }
+
+  // Applies the correction factor to the input erle for getting a more refined
+  // erle estimation for the current input signal.
+  for (size_t k = 0; k < kFftLengthBy2; ++k) {
+    float correction_factor =
+        correction_factors_[n_active_sections[k]][band_to_subband_[k]];
+    erle_[k] = rtc::SafeClamp(average_erle[k] * correction_factor, min_erle_,
+                              max_erle_[band_to_subband_[k]]);
+  }
+}
+
+void SignalDependentErleEstimator::Dump(
+    const std::unique_ptr<ApmDataDumper>& data_dumper) const {
+  for (auto& erle : erle_estimators_) {
+    data_dumper->DumpRaw("aec3_all_erle", erle);
+  }
+  data_dumper->DumpRaw("aec3_ref_erle", erle_ref_);
+  for (auto& factor : correction_factors_) {
+    data_dumper->DumpRaw("aec3_erle_correction_factor", factor);
+  }
+  data_dumper->DumpRaw("aec3_erle", erle_);
+}
+
+// Estimates for each band the smallest number of sections in the filter that
+// together constitute 90% of the estimated echo energy.
+void SignalDependentErleEstimator::ComputeNumberOfActiveFilterSections(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response,
+    rtc::ArrayView<size_t> n_active_filter_sections) {
+  RTC_DCHECK_GT(num_sections_, 1);
+  // Computes an approximation of the power spectrum if the filter would have
+  // been limited to a certain number of filter sections.
+  ComputeEchoEstimatePerFilterSection(render_buffer, filter_frequency_response);
+  // For each band, computes the number of filter sections that are needed for
+  // achieving the 90 % energy in the echo estimate.
+  ComputeActiveFilterSections(n_active_filter_sections);
+}
+
+void SignalDependentErleEstimator::UpdateCorrectionFactors(
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2,
+    rtc::ArrayView<const size_t> n_active_sections) {
+  constexpr float kX2BandEnergyThreshold = 44015068.0f;
+  constexpr float kSmthConstantDecreases = 0.1f;
+  constexpr float kSmthConstantIncreases = kSmthConstantDecreases / 2.f;
+  auto subband_powers = [](rtc::ArrayView<const float> power_spectrum,
+                           rtc::ArrayView<float> power_spectrum_subbands) {
+    for (size_t subband = 0; subband < kSubbands; ++subband) {
+      RTC_DCHECK_LE(kBandBoundaries[subband + 1], power_spectrum.size());
+      power_spectrum_subbands[subband] = std::accumulate(
+          power_spectrum.begin() + kBandBoundaries[subband],
+          power_spectrum.begin() + kBandBoundaries[subband + 1], 0.f);
+    }
+  };
+
+  std::array<float, kSubbands> X2_subbands, E2_subbands, Y2_subbands;
+  subband_powers(X2, X2_subbands);
+  subband_powers(E2, E2_subbands);
+  subband_powers(Y2, Y2_subbands);
+  std::array<size_t, kSubbands> idx_subbands;
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    // When aggregating the number of active sections in the filter for
+    // different bands we choose to take the minimum of all of them. As an
+    // example, if for one of the bands it is the direct path its main
+    // contributor to the final echo estimate, we consider the direct path is
+    // as well the main contributor for the subband that contains that
+    // particular band. That aggregate number of sections will be later used as
+    // the identifier of the erle estimator that needs to be updated.
+    RTC_DCHECK_LE(kBandBoundaries[subband + 1], n_active_sections.size());
+    idx_subbands[subband] = *std::min_element(
+        n_active_sections.begin() + kBandBoundaries[subband],
+        n_active_sections.begin() + kBandBoundaries[subband + 1]);
+  }
+
+  std::array<float, kSubbands> new_erle;
+  std::array<bool, kSubbands> is_erle_updated;
+  is_erle_updated.fill(false);
+  new_erle.fill(0.f);
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    if (X2_subbands[subband] > kX2BandEnergyThreshold &&
+        E2_subbands[subband] > 0) {
+      new_erle[subband] = Y2_subbands[subband] / E2_subbands[subband];
+      RTC_DCHECK_GT(new_erle[subband], 0);
+      is_erle_updated[subband] = true;
+      ++num_updates_[subband];
+    }
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    const size_t idx = idx_subbands[subband];
+    RTC_DCHECK_LT(idx, erle_estimators_.size());
+    float alpha = new_erle[subband] > erle_estimators_[idx][subband]
+                      ? kSmthConstantIncreases
+                      : kSmthConstantDecreases;
+    alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+    erle_estimators_[idx][subband] +=
+        alpha * (new_erle[subband] - erle_estimators_[idx][subband]);
+    erle_estimators_[idx][subband] = rtc::SafeClamp(
+        erle_estimators_[idx][subband], min_erle_, max_erle_[subband]);
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    float alpha = new_erle[subband] > erle_ref_[subband]
+                      ? kSmthConstantIncreases
+                      : kSmthConstantDecreases;
+    alpha = static_cast<float>(is_erle_updated[subband]) * alpha;
+    erle_ref_[subband] += alpha * (new_erle[subband] - erle_ref_[subband]);
+    erle_ref_[subband] =
+        rtc::SafeClamp(erle_ref_[subband], min_erle_, max_erle_[subband]);
+  }
+
+  for (size_t subband = 0; subband < kSubbands; ++subband) {
+    constexpr int kNumUpdateThr = 50;
+    if (is_erle_updated[subband] && num_updates_[subband] > kNumUpdateThr) {
+      const size_t idx = idx_subbands[subband];
+      RTC_DCHECK_GT(erle_ref_[subband], 0.f);
+      // Computes the ratio between the erle that is updated using all the
+      // points and the erle that is updated only on signals that share the
+      // same number of active filter sections.
+      float new_correction_factor =
+          erle_estimators_[idx][subband] / erle_ref_[subband];
+
+      correction_factors_[idx][subband] +=
+          0.1f * (new_correction_factor - correction_factors_[idx][subband]);
+    }
+  }
+}
+
+void SignalDependentErleEstimator::ComputeEchoEstimatePerFilterSection(
+    const RenderBuffer& render_buffer,
+    const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+        filter_frequency_response) {
+  const VectorBuffer& spectrum_render_buffer =
+      render_buffer.GetSpectrumBuffer();
+
+  RTC_DCHECK_EQ(S2_section_accum_.size() + 1,
+                section_boundaries_blocks_.size());
+  size_t idx_render = render_buffer.Position();
+  idx_render = spectrum_render_buffer.OffsetIndex(
+      idx_render, section_boundaries_blocks_[0]);
+
+  for (size_t section = 0; section < num_sections_; ++section) {
+    std::array<float, kFftLengthBy2Plus1> X2_section;
+    std::array<float, kFftLengthBy2Plus1> H2_section;
+    X2_section.fill(0.f);
+    H2_section.fill(0.f);
+    for (size_t block = section_boundaries_blocks_[section];
+         block < section_boundaries_blocks_[section + 1]; ++block) {
+      std::transform(X2_section.begin(), X2_section.end(),
+                     spectrum_render_buffer.buffer[idx_render].begin(),
+                     X2_section.begin(), std::plus<float>());
+      std::transform(H2_section.begin(), H2_section.end(),
+                     filter_frequency_response[block].begin(),
+                     H2_section.begin(), std::plus<float>());
+      idx_render = spectrum_render_buffer.IncIndex(idx_render);
+    }
+
+    std::transform(X2_section.begin(), X2_section.end(), H2_section.begin(),
+                   S2_section_accum_[section].begin(),
+                   std::multiplies<float>());
+  }
+
+  for (size_t section = 1; section < num_sections_; ++section) {
+    std::transform(S2_section_accum_[section - 1].begin(),
+                   S2_section_accum_[section - 1].end(),
+                   S2_section_accum_[section].begin(),
+                   S2_section_accum_[section].begin(), std::plus<float>());
+  }
+}
+
+void SignalDependentErleEstimator::ComputeActiveFilterSections(
+    rtc::ArrayView<size_t> number_active_filter_sections) const {
+  std::fill(number_active_filter_sections.begin(),
+            number_active_filter_sections.end(), 0);
+  for (size_t k = 0; k < kFftLengthBy2Plus1; ++k) {
+    size_t section = num_sections_;
+    float target = 0.9f * S2_section_accum_[num_sections_ - 1][k];
+    while (section > 0 && S2_section_accum_[section - 1][k] >= target) {
+      number_active_filter_sections[k] = --section;
+    }
+  }
+}
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator.h b/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
new file mode 100644
index 0000000..d8b56c2
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator.h
@@ -0,0 +1,93 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+#define MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
+
+#include <memory>
+#include <vector>
+
+#include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/aec3_common.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/logging/apm_data_dumper.h"
+
+namespace webrtc {
+
+// This class estimates the dependency of the Erle to the input signal. By
+// looking at the input signal, an estimation on whether the current echo
+// estimate is due to the direct path or to a more reverberant one is performed.
+// Once that estimation is done, it is possible to refine the average Erle that
+// this class receive as an input.
+class SignalDependentErleEstimator {
+ public:
+  explicit SignalDependentErleEstimator(const EchoCanceller3Config& config);
+
+  ~SignalDependentErleEstimator();
+
+  void Reset();
+
+  // Returns the Erle per frequency subband.
+  const std::array<float, kFftLengthBy2Plus1>& Erle() const { return erle_; }
+
+  // Updates the Erle estimate. The Erle that is passed as an input is required
+  // to be an estimation of the average Erle achieved by the linear filter.
+  void Update(const RenderBuffer& render_buffer,
+              const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+                  filter_frequency_response,
+              rtc::ArrayView<const float> X2,
+              rtc::ArrayView<const float> Y2,
+              rtc::ArrayView<const float> E2,
+              rtc::ArrayView<const float> average_erle,
+              bool converged_filter);
+
+  void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
+
+  static constexpr size_t kSubbands = 6;
+
+ private:
+  void ComputeNumberOfActiveFilterSections(
+      const RenderBuffer& render_buffer,
+      const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+          filter_frequency_response,
+      rtc::ArrayView<size_t> n_active_filter_sections);
+
+  void UpdateCorrectionFactors(rtc::ArrayView<const float> X2,
+                               rtc::ArrayView<const float> Y2,
+                               rtc::ArrayView<const float> E2,
+                               rtc::ArrayView<const size_t> n_active_sections);
+
+  void ComputeEchoEstimatePerFilterSection(
+      const RenderBuffer& render_buffer,
+      const std::vector<std::array<float, kFftLengthBy2Plus1>>&
+          filter_frequency_response);
+
+  void ComputeActiveFilterSections(
+      rtc::ArrayView<size_t> number_active_filter_sections) const;
+
+  const float min_erle_;
+  const size_t num_sections_;
+  const size_t num_blocks_;
+  const size_t delay_headroom_blocks_;
+  const std::array<size_t, kFftLengthBy2Plus1> band_to_subband_;
+  const std::array<float, kSubbands> max_erle_;
+  const std::vector<size_t> section_boundaries_blocks_;
+  std::array<float, kFftLengthBy2Plus1> erle_;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> S2_section_accum_;
+  std::vector<std::array<float, kSubbands>> erle_estimators_;
+  std::array<float, kSubbands> erle_ref_;
+  std::vector<std::array<float, kSubbands>> correction_factors_;
+  std::array<int, kSubbands> num_updates_;
+};
+
+}  // namespace webrtc
+
+#endif  // MODULES_AUDIO_PROCESSING_AEC3_SIGNAL_DEPENDENT_ERLE_ESTIMATOR_H_
diff --git a/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc b/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
new file mode 100644
index 0000000..aec605f
--- /dev/null
+++ b/modules/audio_processing/aec3/signal_dependent_erle_estimator_unittest.cc
@@ -0,0 +1,155 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/aec3/signal_dependent_erle_estimator.h"
+
+#include <algorithm>
+#include <iostream>
+#include <string>
+
+#include "api/audio/echo_canceller3_config.h"
+#include "modules/audio_processing/aec3/render_buffer.h"
+#include "modules/audio_processing/aec3/render_delay_buffer.h"
+#include "rtc_base/strings/string_builder.h"
+#include "test/gtest.h"
+
+namespace webrtc {
+
+namespace {
+
+void GetActiveFrame(rtc::ArrayView<float> x) {
+  const std::array<float, kBlockSize> frame = {
+      7459.88, 17209.6, 17383,   20768.9, 16816.7, 18386.3, 4492.83, 9675.85,
+      6665.52, 14808.6, 9342.3,  7483.28, 19261.7, 4145.98, 1622.18, 13475.2,
+      7166.32, 6856.61, 21937,   7263.14, 9569.07, 14919,   8413.32, 7551.89,
+      7848.65, 6011.27, 13080.6, 15865.2, 12656,   17459.6, 4263.93, 4503.03,
+      9311.79, 21095.8, 12657.9, 13906.6, 19267.2, 11338.1, 16828.9, 11501.6,
+      11405,   15031.4, 14541.6, 19765.5, 18346.3, 19350.2, 3157.47, 18095.8,
+      1743.68, 21328.2, 19727.5, 7295.16, 10332.4, 11055.5, 20107.4, 14708.4,
+      12416.2, 16434,   2454.69, 9840.8,  6867.23, 1615.75, 6059.9,  8394.19};
+  RTC_DCHECK_GE(x.size(), frame.size());
+  std::copy(frame.begin(), frame.end(), x.begin());
+}
+
+class TestInputs {
+ public:
+  explicit TestInputs(const EchoCanceller3Config& cfg);
+  ~TestInputs();
+  const RenderBuffer& GetRenderBuffer() { return *render_buffer_; }
+  rtc::ArrayView<const float> GetX2() { return X2_; }
+  rtc::ArrayView<const float> GetY2() { return Y2_; }
+  rtc::ArrayView<const float> GetE2() { return E2_; }
+  std::vector<std::array<float, kFftLengthBy2Plus1>> GetH2() { return H2_; }
+  void Update();
+
+ private:
+  void UpdateCurrentPowerSpectra();
+  int n_ = 0;
+  std::unique_ptr<RenderDelayBuffer> render_delay_buffer_;
+  RenderBuffer* render_buffer_;
+  std::array<float, kFftLengthBy2Plus1> X2_;
+  std::array<float, kFftLengthBy2Plus1> Y2_;
+  std::array<float, kFftLengthBy2Plus1> E2_;
+  std::vector<std::array<float, kFftLengthBy2Plus1>> H2_;
+  std::vector<std::vector<float>> x_;
+};
+
+TestInputs::TestInputs(const EchoCanceller3Config& cfg)
+    : render_delay_buffer_(RenderDelayBuffer::Create2(cfg, 1)),
+      H2_(cfg.filter.main.length_blocks),
+      x_(1, std::vector<float>(kBlockSize, 0.f)) {
+  render_delay_buffer_->SetDelay(4);
+  render_buffer_ = render_delay_buffer_->GetRenderBuffer();
+  for (auto& H : H2_) {
+    H.fill(0.f);
+  }
+  H2_[0].fill(1.0f);
+}
+
+TestInputs::~TestInputs() = default;
+
+void TestInputs::Update() {
+  if (n_ % 2 == 0) {
+    std::fill(x_[0].begin(), x_[0].end(), 0.f);
+  } else {
+    GetActiveFrame(x_[0]);
+  }
+
+  render_delay_buffer_->Insert(x_);
+  render_delay_buffer_->PrepareCaptureProcessing();
+  UpdateCurrentPowerSpectra();
+  ++n_;
+}
+
+void TestInputs::UpdateCurrentPowerSpectra() {
+  const VectorBuffer& spectrum_render_buffer =
+      render_buffer_->GetSpectrumBuffer();
+  size_t idx = render_buffer_->Position();
+  size_t prev_idx = spectrum_render_buffer.OffsetIndex(idx, 1);
+  auto& X2 = spectrum_render_buffer.buffer[idx];
+  auto& X2_prev = spectrum_render_buffer.buffer[prev_idx];
+  std::copy(X2.begin(), X2.end(), X2_.begin());
+  RTC_DCHECK_EQ(X2.size(), Y2_.size());
+  for (size_t k = 0; k < X2.size(); ++k) {
+    E2_[k] = 0.01f * X2_prev[k];
+    Y2_[k] = X2[k] + E2_[k];
+  }
+}
+
+}  // namespace
+
+TEST(SignalDependentErleEstimator, SweepSettings) {
+  EchoCanceller3Config cfg;
+  size_t max_length_blocks = 50;
+  for (size_t blocks = 0; blocks < max_length_blocks; blocks = blocks + 10) {
+    for (size_t delay_headroom = 0; delay_headroom < 5; ++delay_headroom) {
+      for (size_t num_sections = 2; num_sections < max_length_blocks;
+           ++num_sections) {
+        cfg.filter.main.length_blocks = blocks;
+        cfg.filter.main_initial.length_blocks =
+            std::min(cfg.filter.main_initial.length_blocks, blocks);
+        cfg.delay.delay_headroom_blocks = delay_headroom;
+        cfg.erle.num_sections = num_sections;
+        if (EchoCanceller3Config::Validate(&cfg)) {
+          SignalDependentErleEstimator s(cfg);
+          std::array<float, kFftLengthBy2Plus1> average_erle;
+          average_erle.fill(cfg.erle.max_l);
+          TestInputs inputs(cfg);
+          for (size_t n = 0; n < 10; ++n) {
+            inputs.Update();
+            s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+                     inputs.GetY2(), inputs.GetE2(), average_erle, true);
+          }
+        }
+      }
+    }
+  }
+}
+
+TEST(SignalDependentErleEstimator, LongerRun) {
+  EchoCanceller3Config cfg;
+  cfg.filter.main.length_blocks = 2;
+  cfg.filter.main_initial.length_blocks = 1;
+  cfg.delay.delay_headroom_blocks = 0;
+  cfg.delay.hysteresis_limit_1_blocks = 0;
+  cfg.erle.num_sections = 2;
+  EXPECT_EQ(EchoCanceller3Config::Validate(&cfg), true);
+  std::array<float, kFftLengthBy2Plus1> average_erle;
+  average_erle.fill(cfg.erle.max_l);
+  SignalDependentErleEstimator s(cfg);
+  TestInputs inputs(cfg);
+  for (size_t n = 0; n < 200; ++n) {
+    inputs.Update();
+    s.Update(inputs.GetRenderBuffer(), inputs.GetH2(), inputs.GetX2(),
+             inputs.GetY2(), inputs.GetE2(), average_erle, true);
+  }
+}
+
+}  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subband_erle_estimator.cc b/modules/audio_processing/aec3/subband_erle_estimator.cc
index 2cb5acc..9453e57 100644
--- a/modules/audio_processing/aec3/subband_erle_estimator.cc
+++ b/modules/audio_processing/aec3/subband_erle_estimator.cc
@@ -11,12 +11,8 @@
 #include "modules/audio_processing/aec3/subband_erle_estimator.h"
 
 #include <algorithm>
-#include <memory>
+#include <functional>
 
-#include "absl/types/optional.h"
-#include "api/array_view.h"
-#include "modules/audio_processing/aec3/aec3_common.h"
-#include "modules/audio_processing/logging/apm_data_dumper.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/numerics/safe_minmax.h"
 #include "system_wrappers/include/field_trial.h"
@@ -24,23 +20,29 @@
 namespace webrtc {
 
 namespace {
-constexpr int kPointsToAccumulate = 6;
+
 constexpr float kX2BandEnergyThreshold = 44015068.0f;
-constexpr int kErleHold = 100;
-constexpr int kBlocksForOnsetDetection = kErleHold + 150;
+constexpr int kBlocksToHoldErle = 100;
+constexpr int kBlocksForOnsetDetection = kBlocksToHoldErle + 150;
+constexpr int kPointsToAccumulate = 6;
 
 bool EnableAdaptErleOnLowRender() {
   return !field_trial::IsEnabled("WebRTC-Aec3AdaptErleOnLowRenderKillSwitch");
 }
 
+std::array<float, kFftLengthBy2Plus1> SetMaxErleBands(float max_erle_l,
+                                                      float max_erle_h) {
+  std::array<float, kFftLengthBy2Plus1> max_erle;
+  std::fill(max_erle.begin(), max_erle.begin() + kFftLengthBy2 / 2, max_erle_l);
+  std::fill(max_erle.begin() + kFftLengthBy2 / 2, max_erle.end(), max_erle_h);
+  return max_erle;
+}
+
 }  // namespace
 
-SubbandErleEstimator::SubbandErleEstimator(float min_erle,
-                                           float max_erle_lf,
-                                           float max_erle_hf)
-    : min_erle_(min_erle),
-      max_erle_lf_(max_erle_lf),
-      max_erle_hf_(max_erle_hf),
+SubbandErleEstimator::SubbandErleEstimator(const EchoCanceller3Config& config)
+    : min_erle_(config.erle.min),
+      max_erle_(SetMaxErleBands(config.erle.max_l, config.erle.max_h)),
       adapt_on_low_render_(EnableAdaptErleOnLowRender()) {
   Reset();
 }
@@ -50,8 +52,9 @@
 void SubbandErleEstimator::Reset() {
   erle_.fill(min_erle_);
   erle_onsets_.fill(min_erle_);
-  hold_counters_.fill(0);
   coming_onset_.fill(true);
+  hold_counters_.fill(0);
+  ResetAccumulatedSpectra();
 }
 
 void SubbandErleEstimator::Update(rtc::ArrayView<const float> X2,
@@ -63,10 +66,8 @@
     // Note that the use of the converged_filter flag already imposed
     // a minimum of the erle that can be estimated as that flag would
     // be false if the filter is performing poorly.
-    constexpr size_t kFftLengthBy4 = kFftLengthBy2 / 2;
-    UpdateBands(X2, Y2, E2, 1, kFftLengthBy4, max_erle_lf_, onset_detection);
-    UpdateBands(X2, Y2, E2, kFftLengthBy4, kFftLengthBy2, max_erle_hf_,
-                onset_detection);
+    UpdateAccumulatedSpectra(X2, Y2, E2);
+    UpdateBands(onset_detection);
   }
 
   if (onset_detection) {
@@ -79,61 +80,53 @@
 
 void SubbandErleEstimator::Dump(
     const std::unique_ptr<ApmDataDumper>& data_dumper) const {
-  data_dumper->DumpRaw("aec3_erle", Erle());
   data_dumper->DumpRaw("aec3_erle_onset", ErleOnsets());
 }
 
-void SubbandErleEstimator::UpdateBands(rtc::ArrayView<const float> X2,
-                                       rtc::ArrayView<const float> Y2,
-                                       rtc::ArrayView<const float> E2,
-                                       size_t start,
-                                       size_t stop,
-                                       float max_erle,
-                                       bool onset_detection) {
-  auto erle_band_update = [](float erle_band, float new_erle,
-                             bool low_render_energy, float alpha_inc,
-                             float alpha_dec, float min_erle, float max_erle) {
-    if (new_erle < erle_band && low_render_energy) {
-      // Decreases are not allowed if low render energy signals were used for
-      // the erle computation.
-      return erle_band;
+void SubbandErleEstimator::UpdateBands(bool onset_detection) {
+  std::array<float, kFftLengthBy2> new_erle;
+  std::array<bool, kFftLengthBy2> is_erle_updated;
+  is_erle_updated.fill(false);
+
+  for (size_t k = 1; k < kFftLengthBy2; ++k) {
+    if (accum_spectra_.num_points_[k] == kPointsToAccumulate &&
+        accum_spectra_.E2_[k] > 0.f) {
+      new_erle[k] = accum_spectra_.Y2_[k] / accum_spectra_.E2_[k];
+      is_erle_updated[k] = true;
     }
-    float alpha = new_erle > erle_band ? alpha_inc : alpha_dec;
-    float erle_band_out = erle_band;
-    erle_band_out = erle_band + alpha * (new_erle - erle_band);
-    erle_band_out = rtc::SafeClamp(erle_band_out, min_erle, max_erle);
-    return erle_band_out;
-  };
+  }
 
-  for (size_t k = start; k < stop; ++k) {
-    if (adapt_on_low_render_ || X2[k] > kX2BandEnergyThreshold) {
-      bool low_render_energy = false;
-      absl::optional<float> new_erle = instantaneous_erle_.Update(
-          X2[k], Y2[k], E2[k], k, &low_render_energy);
-      if (new_erle) {
-        RTC_DCHECK(adapt_on_low_render_ || !low_render_energy);
-        if (onset_detection && !low_render_energy) {
-          if (coming_onset_[k]) {
-            coming_onset_[k] = false;
-            erle_onsets_[k] = erle_band_update(
-                erle_onsets_[k], new_erle.value(), low_render_energy, 0.15f,
-                0.3f, min_erle_, max_erle);
-          }
-          hold_counters_[k] = kBlocksForOnsetDetection;
+  if (onset_detection) {
+    for (size_t k = 1; k < kFftLengthBy2; ++k) {
+      if (is_erle_updated[k] && !accum_spectra_.low_render_energy_[k]) {
+        if (coming_onset_[k]) {
+          coming_onset_[k] = false;
+          float alpha = new_erle[k] < erle_onsets_[k] ? 0.3f : 0.15f;
+          erle_onsets_[k] = rtc::SafeClamp(
+              erle_onsets_[k] + alpha * (new_erle[k] - erle_onsets_[k]),
+              min_erle_, max_erle_[k]);
         }
-
-        erle_[k] =
-            erle_band_update(erle_[k], new_erle.value(), low_render_energy,
-                             0.05f, 0.1f, min_erle_, max_erle);
+        hold_counters_[k] = kBlocksForOnsetDetection;
       }
     }
   }
+
+  for (size_t k = 1; k < kFftLengthBy2; ++k) {
+    if (is_erle_updated[k]) {
+      float alpha = 0.05f;
+      if (new_erle[k] < erle_[k]) {
+        alpha = accum_spectra_.low_render_energy_[k] ? 0.f : 0.1f;
+      }
+      erle_[k] = rtc::SafeClamp(erle_[k] + alpha * (new_erle[k] - erle_[k]),
+                                min_erle_, max_erle_[k]);
+    }
+  }
 }
 
 void SubbandErleEstimator::DecreaseErlePerBandForLowRenderSignals() {
   for (size_t k = 1; k < kFftLengthBy2; ++k) {
     hold_counters_[k]--;
-    if (hold_counters_[k] <= (kBlocksForOnsetDetection - kErleHold)) {
+    if (hold_counters_[k] <= (kBlocksForOnsetDetection - kBlocksToHoldErle)) {
       if (erle_[k] > erle_onsets_[k]) {
         erle_[k] = std::max(erle_onsets_[k], 0.97f * erle_[k]);
         RTC_DCHECK_LE(min_erle_, erle_[k]);
@@ -146,43 +139,55 @@
   }
 }
 
-SubbandErleEstimator::ErleInstantaneous::ErleInstantaneous() {
-  Reset();
+void SubbandErleEstimator::ResetAccumulatedSpectra() {
+  accum_spectra_.Y2_.fill(0.f);
+  accum_spectra_.E2_.fill(0.f);
+  accum_spectra_.num_points_.fill(0);
+  accum_spectra_.low_render_energy_.fill(false);
 }
 
-SubbandErleEstimator::ErleInstantaneous::~ErleInstantaneous() = default;
-
-absl::optional<float> SubbandErleEstimator::ErleInstantaneous::Update(
-    float X2,
-    float Y2,
-    float E2,
-    size_t band,
-    bool* low_render_energy) {
-  absl::optional<float> erle_instantaneous = absl::nullopt;
-  RTC_DCHECK_LT(band, kFftLengthBy2Plus1);
-  Y2_acum_[band] += Y2;
-  E2_acum_[band] += E2;
-  low_render_energy_[band] =
-      low_render_energy_[band] || X2 < kX2BandEnergyThreshold;
-  if (++num_points_[band] == kPointsToAccumulate) {
-    if (E2_acum_[band]) {
-      erle_instantaneous = Y2_acum_[band] / E2_acum_[band];
+void SubbandErleEstimator::UpdateAccumulatedSpectra(
+    rtc::ArrayView<const float> X2,
+    rtc::ArrayView<const float> Y2,
+    rtc::ArrayView<const float> E2) {
+  auto& st = accum_spectra_;
+  if (adapt_on_low_render_) {
+    if (st.num_points_[0] == kPointsToAccumulate) {
+      st.num_points_[0] = 0;
+      st.Y2_.fill(0.f);
+      st.E2_.fill(0.f);
+      st.low_render_energy_.fill(false);
     }
-    *low_render_energy = low_render_energy_[band];
-    num_points_[band] = 0;
-    Y2_acum_[band] = 0.f;
-    E2_acum_[band] = 0.f;
-    low_render_energy_[band] = false;
+    std::transform(Y2.begin(), Y2.end(), st.Y2_.begin(), st.Y2_.begin(),
+                   std::plus<float>());
+    std::transform(E2.begin(), E2.end(), st.E2_.begin(), st.E2_.begin(),
+                   std::plus<float>());
+
+    for (size_t k = 0; k < X2.size(); ++k) {
+      st.low_render_energy_[k] =
+          st.low_render_energy_[k] || X2[k] < kX2BandEnergyThreshold;
+    }
+    st.num_points_[0]++;
+    st.num_points_.fill(st.num_points_[0]);
+
+  } else {
+    // The update is always done using high render energy signals and
+    // therefore the field accum_spectra_.low_render_energy_ does not need to
+    // be modified.
+    for (size_t k = 0; k < X2.size(); ++k) {
+      if (X2[k] > kX2BandEnergyThreshold) {
+        if (st.num_points_[k] == kPointsToAccumulate) {
+          st.Y2_[k] = 0.f;
+          st.E2_[k] = 0.f;
+          st.num_points_[k] = 0;
+        }
+        st.Y2_[k] += Y2[k];
+        st.E2_[k] += E2[k];
+        st.num_points_[k]++;
+      }
+      RTC_DCHECK_EQ(st.low_render_energy_[k], false);
+    }
   }
-
-  return erle_instantaneous;
-}
-
-void SubbandErleEstimator::ErleInstantaneous::Reset() {
-  Y2_acum_.fill(0.f);
-  E2_acum_.fill(0.f);
-  low_render_energy_.fill(false);
-  num_points_.fill(0);
 }
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/subband_erle_estimator.h b/modules/audio_processing/aec3/subband_erle_estimator.h
index 7693b6a..b9862db 100644
--- a/modules/audio_processing/aec3/subband_erle_estimator.h
+++ b/modules/audio_processing/aec3/subband_erle_estimator.h
@@ -14,9 +14,10 @@
 #include <stddef.h>
 #include <array>
 #include <memory>
+#include <vector>
 
-#include "absl/types/optional.h"
 #include "api/array_view.h"
+#include "api/audio/echo_canceller3_config.h"
 #include "modules/audio_processing/aec3/aec3_common.h"
 #include "modules/audio_processing/logging/apm_data_dumper.h"
 
@@ -25,7 +26,7 @@
 // Estimates the echo return loss enhancement for each frequency subband.
 class SubbandErleEstimator {
  public:
-  SubbandErleEstimator(float min_erle, float max_erle_lf, float max_erle_hf);
+  explicit SubbandErleEstimator(const EchoCanceller3Config& config);
   ~SubbandErleEstimator();
 
   // Resets the ERLE estimator.
@@ -42,55 +43,35 @@
   const std::array<float, kFftLengthBy2Plus1>& Erle() const { return erle_; }
 
   // Returns the ERLE estimate at onsets.
-  const std::array<float, kFftLengthBy2Plus1>& ErleOnsets() const {
-    return erle_onsets_;
-  }
+  rtc::ArrayView<const float> ErleOnsets() const { return erle_onsets_; }
 
   void Dump(const std::unique_ptr<ApmDataDumper>& data_dumper) const;
 
  private:
-  void UpdateBands(rtc::ArrayView<const float> X2,
-                   rtc::ArrayView<const float> Y2,
-                   rtc::ArrayView<const float> E2,
-                   size_t start,
-                   size_t stop,
-                   float max_erle,
-                   bool onset_detection);
-  void DecreaseErlePerBandForLowRenderSignals();
-
-  class ErleInstantaneous {
-   public:
-    ErleInstantaneous();
-    ~ErleInstantaneous();
-    // Updates the ERLE for a band with a new block. Returns absl::nullopt
-    // if not enough points were accumulated for doing the estimation,
-    // otherwise, it returns the ERLE. When the ERLE is returned, the
-    // low_render_energy flag contains information on whether the estimation was
-    // done using low level render signals.
-    absl::optional<float> Update(float X2,
-                                 float Y2,
-                                 float E2,
-                                 size_t band,
-                                 bool* low_render_energy);
-    // Resets the ERLE estimator to its initial state.
-    void Reset();
-
-   private:
-    std::array<float, kFftLengthBy2Plus1> Y2_acum_;
-    std::array<float, kFftLengthBy2Plus1> E2_acum_;
+  struct AccumulatedSpectra {
+    std::array<float, kFftLengthBy2Plus1> Y2_;
+    std::array<float, kFftLengthBy2Plus1> E2_;
     std::array<bool, kFftLengthBy2Plus1> low_render_energy_;
     std::array<int, kFftLengthBy2Plus1> num_points_;
   };
 
-  ErleInstantaneous instantaneous_erle_;
+  void UpdateAccumulatedSpectra(rtc::ArrayView<const float> X2,
+                                rtc::ArrayView<const float> Y2,
+                                rtc::ArrayView<const float> E2);
+
+  void ResetAccumulatedSpectra();
+
+  void UpdateBands(bool onset_detection);
+  void DecreaseErlePerBandForLowRenderSignals();
+
+  const float min_erle_;
+  const std::array<float, kFftLengthBy2Plus1> max_erle_;
+  const bool adapt_on_low_render_;
+  AccumulatedSpectra accum_spectra_;
   std::array<float, kFftLengthBy2Plus1> erle_;
   std::array<float, kFftLengthBy2Plus1> erle_onsets_;
   std::array<bool, kFftLengthBy2Plus1> coming_onset_;
   std::array<int, kFftLengthBy2Plus1> hold_counters_;
-  const float min_erle_;
-  const float max_erle_lf_;
-  const float max_erle_hf_;
-  const bool adapt_on_low_render_;
 };
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/aec3/suppression_gain.cc b/modules/audio_processing/aec3/suppression_gain.cc
index 88cfc0a..c6d2bf6 100644
--- a/modules/audio_processing/aec3/suppression_gain.cc
+++ b/modules/audio_processing/aec3/suppression_gain.cc
@@ -419,7 +419,7 @@
   // Detect strong active nearend if the nearend is sufficiently stronger than
   // the echo and the nearend noise.
   if ((!initial_state || use_during_initial_phase_) &&
-      ne_sum > enr_threshold_ * echo_sum &&
+      echo_sum < enr_threshold_ * ne_sum &&
       ne_sum > snr_threshold_ * noise_sum) {
     if (++trigger_counter_ >= trigger_threshold_) {
       // After a period of strong active nearend activity, flag nearend mode.
@@ -432,7 +432,7 @@
   }
 
   // Exit nearend-state early at strong echo.
-  if (ne_sum < enr_exit_threshold_ * echo_sum &&
+  if (echo_sum > enr_exit_threshold_ * ne_sum &&
       echo_sum > snr_threshold_ * noise_sum) {
     hold_counter_ = 0;
   }
diff --git a/modules/audio_processing/agc2/BUILD.gn b/modules/audio_processing/agc2/BUILD.gn
index 18f2d78..5431a15 100644
--- a/modules/audio_processing/agc2/BUILD.gn
+++ b/modules/audio_processing/agc2/BUILD.gn
@@ -27,6 +27,7 @@
     ":gain_applier",
     ":noise_level_estimator",
     ":rnn_vad_with_level",
+    "..:api",
     "..:apm_logging",
     "..:audio_frame_view",
     "../../../api:array_view",
@@ -58,6 +59,7 @@
     ":gain_applier",
     ":noise_level_estimator",
     ":rnn_vad_with_level",
+    "..:api",
     "..:apm_logging",
     "..:audio_frame_view",
     "../../../api:array_view",
@@ -190,8 +192,8 @@
     "../../../api:array_view",
     "../../../common_audio",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
   ]
 }
 
@@ -202,7 +204,7 @@
   ]
   deps = [
     ":biquad_filter",
-    "../../../rtc_base:rtc_base_tests_utils",
+    "../../../rtc_base:gunit_helpers",
   ]
 }
 
@@ -230,8 +232,8 @@
     "../../../api:array_view",
     "../../../common_audio",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
     "../../../system_wrappers:metrics",
     "//third_party/abseil-cpp/absl/memory",
   ]
@@ -252,8 +254,20 @@
     "..:audio_frame_view",
     "../../../api:array_view",
     "../../../rtc_base:checks",
+    "../../../rtc_base:gunit_helpers",
     "../../../rtc_base:rtc_base_approved",
-    "../../../rtc_base:rtc_base_tests_utils",
+  ]
+}
+
+rtc_source_set("rnn_vad_with_level_unittests") {
+  testonly = true
+  sources = [
+    "vad_with_level_unittest.cc",
+  ]
+  deps = [
+    ":rnn_vad_with_level",
+    "..:audio_frame_view",
+    "../../../rtc_base:gunit_helpers",
   ]
 }
 
diff --git a/modules/audio_processing/agc2/adaptive_agc.cc b/modules/audio_processing/agc2/adaptive_agc.cc
index 795b8b5..a5d3608 100644
--- a/modules/audio_processing/agc2/adaptive_agc.cc
+++ b/modules/audio_processing/agc2/adaptive_agc.cc
@@ -26,8 +26,12 @@
 }
 
 AdaptiveAgc::AdaptiveAgc(ApmDataDumper* apm_data_dumper,
-                         float extra_saturation_margin_db)
-    : speech_level_estimator_(apm_data_dumper, extra_saturation_margin_db),
+                         const AudioProcessing::Config::GainController2& config)
+    : speech_level_estimator_(
+          apm_data_dumper,
+          config.adaptive_digital.level_estimator,
+          config.adaptive_digital.use_saturation_protector,
+          config.adaptive_digital.extra_saturation_margin_db),
       gain_applier_(apm_data_dumper),
       apm_data_dumper_(apm_data_dumper),
       noise_level_estimator_(apm_data_dumper) {
@@ -44,9 +48,9 @@
                             signal_with_levels.vad_result.speech_probability);
   apm_data_dumper_->DumpRaw("agc2_vad_rms_dbfs",
                             signal_with_levels.vad_result.speech_rms_dbfs);
-
   apm_data_dumper_->DumpRaw("agc2_vad_peak_dbfs",
                             signal_with_levels.vad_result.speech_peak_dbfs);
+
   speech_level_estimator_.UpdateEstimation(signal_with_levels.vad_result);
 
   signal_with_levels.input_level_dbfs =
@@ -68,7 +72,6 @@
 
   // The gain applier applies the gain.
   gain_applier_.Process(signal_with_levels);
-  ;
 }
 
 void AdaptiveAgc::Reset() {
diff --git a/modules/audio_processing/agc2/adaptive_agc.h b/modules/audio_processing/agc2/adaptive_agc.h
index 6c0917a..16c0082 100644
--- a/modules/audio_processing/agc2/adaptive_agc.h
+++ b/modules/audio_processing/agc2/adaptive_agc.h
@@ -16,6 +16,7 @@
 #include "modules/audio_processing/agc2/noise_level_estimator.h"
 #include "modules/audio_processing/agc2/vad_with_level.h"
 #include "modules/audio_processing/include/audio_frame_view.h"
+#include "modules/audio_processing/include/audio_processing.h"
 
 namespace webrtc {
 class ApmDataDumper;
@@ -23,7 +24,8 @@
 class AdaptiveAgc {
  public:
   explicit AdaptiveAgc(ApmDataDumper* apm_data_dumper);
-  AdaptiveAgc(ApmDataDumper* apm_data_dumper, float extra_saturation_margin_db);
+  AdaptiveAgc(ApmDataDumper* apm_data_dumper,
+              const AudioProcessing::Config::GainController2& config);
   ~AdaptiveAgc();
 
   void Process(AudioFrameView<float> float_frame, float last_audio_level);
diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
index 138faec..8640324 100644
--- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
+++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.cc
@@ -19,13 +19,20 @@
 
 AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator(
     ApmDataDumper* apm_data_dumper)
-    : saturation_protector_(apm_data_dumper),
+    : level_estimator_(
+          AudioProcessing::Config::GainController2::LevelEstimator::kRms),
+      use_saturation_protector_(true),
+      saturation_protector_(apm_data_dumper),
       apm_data_dumper_(apm_data_dumper) {}
 
 AdaptiveModeLevelEstimator::AdaptiveModeLevelEstimator(
     ApmDataDumper* apm_data_dumper,
+    AudioProcessing::Config::GainController2::LevelEstimator level_estimator,
+    bool use_saturation_protector,
     float extra_saturation_margin_db)
-    : saturation_protector_(apm_data_dumper, extra_saturation_margin_db),
+    : level_estimator_(level_estimator),
+      use_saturation_protector_(use_saturation_protector),
+      saturation_protector_(apm_data_dumper, extra_saturation_margin_db),
       apm_data_dumper_(apm_data_dumper) {}
 
 void AdaptiveModeLevelEstimator::UpdateEstimation(
@@ -49,20 +56,38 @@
 
   const float leak_factor = buffer_is_full ? kFullBufferLeakFactor : 1.f;
 
+  // Read speech level estimation.
+  float speech_level_dbfs = 0.f;
+  using LevelEstimatorType =
+      AudioProcessing::Config::GainController2::LevelEstimator;
+  switch (level_estimator_) {
+    case LevelEstimatorType::kRms:
+      speech_level_dbfs = vad_data.speech_rms_dbfs;
+      break;
+    case LevelEstimatorType::kPeak:
+      speech_level_dbfs = vad_data.speech_peak_dbfs;
+      break;
+  }
+
+  // Update speech level estimation.
   estimate_numerator_ = estimate_numerator_ * leak_factor +
-                        vad_data.speech_rms_dbfs * vad_data.speech_probability;
+                        speech_level_dbfs * vad_data.speech_probability;
   estimate_denominator_ =
       estimate_denominator_ * leak_factor + vad_data.speech_probability;
-
   last_estimate_with_offset_dbfs_ = estimate_numerator_ / estimate_denominator_;
 
-  saturation_protector_.UpdateMargin(vad_data, last_estimate_with_offset_dbfs_);
-  DebugDumpEstimate();
+  if (use_saturation_protector_) {
+    saturation_protector_.UpdateMargin(vad_data,
+                                       last_estimate_with_offset_dbfs_);
+    DebugDumpEstimate();
+  }
 }
 
 float AdaptiveModeLevelEstimator::LatestLevelEstimate() const {
   return rtc::SafeClamp<float>(
-      last_estimate_with_offset_dbfs_ + saturation_protector_.LastMargin(),
+      last_estimate_with_offset_dbfs_ +
+          (use_saturation_protector_ ? saturation_protector_.LastMargin()
+                                     : 0.f),
       -90.f, 30.f);
 }
 
diff --git a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
index f887268..63b9de2 100644
--- a/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
+++ b/modules/audio_processing/agc2/adaptive_mode_level_estimator.h
@@ -16,6 +16,7 @@
 #include "modules/audio_processing/agc2/agc2_common.h"  // kFullBufferSizeMs...
 #include "modules/audio_processing/agc2/saturation_protector.h"
 #include "modules/audio_processing/agc2/vad_with_level.h"
+#include "modules/audio_processing/include/audio_processing.h"
 
 namespace webrtc {
 class ApmDataDumper;
@@ -23,8 +24,11 @@
 class AdaptiveModeLevelEstimator {
  public:
   explicit AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper);
-  AdaptiveModeLevelEstimator(ApmDataDumper* apm_data_dumper,
-                             float extra_saturation_margin_db);
+  AdaptiveModeLevelEstimator(
+      ApmDataDumper* apm_data_dumper,
+      AudioProcessing::Config::GainController2::LevelEstimator level_estimator,
+      bool use_saturation_protector,
+      float extra_saturation_margin_db);
   void UpdateEstimation(const VadWithLevel::LevelAndProbability& vad_data);
   float LatestLevelEstimate() const;
   void Reset();
@@ -35,6 +39,9 @@
  private:
   void DebugDumpEstimate();
 
+  const AudioProcessing::Config::GainController2::LevelEstimator
+      level_estimator_;
+  const bool use_saturation_protector_;
   size_t buffer_size_ms_ = 0;
   float last_estimate_with_offset_dbfs_ = kInitialSpeechLevelEstimateDbfs;
   float estimate_numerator_ = 0.f;
diff --git a/modules/audio_processing/agc2/vad_with_level_unittest.cc b/modules/audio_processing/agc2/vad_with_level_unittest.cc
new file mode 100644
index 0000000..f9aee62
--- /dev/null
+++ b/modules/audio_processing/agc2/vad_with_level_unittest.cc
@@ -0,0 +1,40 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_processing/agc2/vad_with_level.h"
+
+#include "rtc_base/gunit.h"
+
+namespace webrtc {
+namespace test {
+
+TEST(AutomaticGainController2VadWithLevelEstimator,
+     PeakLevelGreaterThanRmsLevel) {
+  constexpr size_t kSampleRateHz = 8000;
+
+  // 10 ms input frame, constant except for one peak value.
+  // Handcrafted so that the average is lower than the peak value.
+  std::array<float, kSampleRateHz / 100> frame;
+  frame.fill(1000.f);
+  frame[10] = 2000.f;
+  float* const channel0 = frame.data();
+  AudioFrameView<float> frame_view(&channel0, 1, frame.size());
+
+  // Compute audio frame levels (the VAD result is ignored).
+  VadWithLevel vad_with_level;
+  auto levels_and_vad_prob = vad_with_level.AnalyzeFrame(frame_view);
+
+  // Compare peak and RMS levels.
+  EXPECT_LT(levels_and_vad_prob.speech_rms_dbfs,
+            levels_and_vad_prob.speech_peak_dbfs);
+}
+
+}  // namespace test
+}  // namespace webrtc
diff --git a/modules/audio_processing/audio_processing_impl.cc b/modules/audio_processing/audio_processing_impl.cc
index 3764647..2937c06 100644
--- a/modules/audio_processing/audio_processing_impl.cc
+++ b/modules/audio_processing/audio_processing_impl.cc
@@ -259,6 +259,7 @@
   std::unique_ptr<CustomProcessing> render_pre_processor;
   std::unique_ptr<GainApplier> pre_amplifier;
   std::unique_ptr<CustomAudioAnalyzer> capture_analyzer;
+  std::unique_ptr<LevelEstimatorImpl> output_level_estimator;
 };
 
 AudioProcessingBuilder::AudioProcessingBuilder() = default;
@@ -673,6 +674,13 @@
                    << config_.gain_controller2.enabled;
   RTC_LOG(LS_INFO) << "Pre-amplifier activated: "
                    << config_.pre_amplifier.enabled;
+
+  if (config_.level_estimation.enabled &&
+      !private_submodules_->output_level_estimator) {
+    private_submodules_->output_level_estimator.reset(
+        new LevelEstimatorImpl(&crit_capture_));
+    private_submodules_->output_level_estimator->Enable(true);
+  }
 }
 
 void AudioProcessingImpl::SetExtraOptions(const webrtc::Config& config) {
@@ -1336,6 +1344,13 @@
 
   // The level estimator operates on the recombined data.
   public_submodules_->level_estimator->ProcessStream(capture_buffer);
+  if (config_.level_estimation.enabled) {
+    private_submodules_->output_level_estimator->ProcessStream(capture_buffer);
+    capture_.stats.output_rms_dbfs =
+        private_submodules_->output_level_estimator->RMS();
+  } else {
+    capture_.stats.output_rms_dbfs = absl::nullopt;
+  }
 
   capture_output_rms_.Analyze(rtc::ArrayView<const int16_t>(
       capture_buffer->channels_const()[0],
@@ -1587,49 +1602,50 @@
 
 AudioProcessingStats AudioProcessingImpl::GetStatistics(
     bool has_remote_tracks) const {
-  AudioProcessingStats stats;
-  if (has_remote_tracks) {
-    EchoCancellationImpl::Metrics metrics;
-    rtc::CritScope cs_capture(&crit_capture_);
-    if (private_submodules_->echo_controller) {
-      auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
-      stats.echo_return_loss = ec_metrics.echo_return_loss;
+  rtc::CritScope cs_capture(&crit_capture_);
+  if (!has_remote_tracks) {
+    return capture_.stats;
+  }
+  AudioProcessingStats stats = capture_.stats;
+  EchoCancellationImpl::Metrics metrics;
+  if (private_submodules_->echo_controller) {
+    auto ec_metrics = private_submodules_->echo_controller->GetMetrics();
+    stats.echo_return_loss = ec_metrics.echo_return_loss;
+    stats.echo_return_loss_enhancement =
+        ec_metrics.echo_return_loss_enhancement;
+    stats.delay_ms = ec_metrics.delay_ms;
+  } else if (private_submodules_->echo_cancellation->GetMetrics(&metrics) ==
+             Error::kNoError) {
+    if (metrics.divergent_filter_fraction != -1.0f) {
+      stats.divergent_filter_fraction =
+          absl::optional<double>(metrics.divergent_filter_fraction);
+    }
+    if (metrics.echo_return_loss.instant != -100) {
+      stats.echo_return_loss =
+          absl::optional<double>(metrics.echo_return_loss.instant);
+    }
+    if (metrics.echo_return_loss_enhancement.instant != -100) {
       stats.echo_return_loss_enhancement =
-          ec_metrics.echo_return_loss_enhancement;
-      stats.delay_ms = ec_metrics.delay_ms;
-    } else if (private_submodules_->echo_cancellation->GetMetrics(&metrics) ==
-               Error::kNoError) {
-      if (metrics.divergent_filter_fraction != -1.0f) {
-        stats.divergent_filter_fraction =
-            absl::optional<double>(metrics.divergent_filter_fraction);
-      }
-      if (metrics.echo_return_loss.instant != -100) {
-        stats.echo_return_loss =
-            absl::optional<double>(metrics.echo_return_loss.instant);
-      }
-      if (metrics.echo_return_loss_enhancement.instant != -100) {
-        stats.echo_return_loss_enhancement = absl::optional<double>(
-            metrics.echo_return_loss_enhancement.instant);
-      }
+          absl::optional<double>(metrics.echo_return_loss_enhancement.instant);
     }
-    if (config_.residual_echo_detector.enabled) {
-      RTC_DCHECK(private_submodules_->echo_detector);
-      auto ed_metrics = private_submodules_->echo_detector->GetMetrics();
-      stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
-      stats.residual_echo_likelihood_recent_max =
-          ed_metrics.echo_likelihood_recent_max;
+  }
+  if (config_.residual_echo_detector.enabled) {
+    RTC_DCHECK(private_submodules_->echo_detector);
+    auto ed_metrics = private_submodules_->echo_detector->GetMetrics();
+    stats.residual_echo_likelihood = ed_metrics.echo_likelihood;
+    stats.residual_echo_likelihood_recent_max =
+        ed_metrics.echo_likelihood_recent_max;
+  }
+  int delay_median, delay_std;
+  float fraction_poor_delays;
+  if (private_submodules_->echo_cancellation->GetDelayMetrics(
+          &delay_median, &delay_std, &fraction_poor_delays) ==
+      Error::kNoError) {
+    if (delay_median >= 0) {
+      stats.delay_median_ms = absl::optional<int32_t>(delay_median);
     }
-    int delay_median, delay_std;
-    float fraction_poor_delays;
-    if (private_submodules_->echo_cancellation->GetDelayMetrics(
-            &delay_median, &delay_std, &fraction_poor_delays) ==
-        Error::kNoError) {
-      if (delay_median >= 0) {
-        stats.delay_median_ms = absl::optional<int32_t>(delay_median);
-      }
-      if (delay_std >= 0) {
-        stats.delay_standard_deviation_ms = absl::optional<int32_t>(delay_std);
-      }
+    if (delay_std >= 0) {
+      stats.delay_standard_deviation_ms = absl::optional<int32_t>(delay_std);
     }
   }
   return stats;
diff --git a/modules/audio_processing/audio_processing_impl.h b/modules/audio_processing/audio_processing_impl.h
index e376a74..2f946c5 100644
--- a/modules/audio_processing/audio_processing_impl.h
+++ b/modules/audio_processing/audio_processing_impl.h
@@ -18,6 +18,7 @@
 #include "modules/audio_processing/audio_buffer.h"
 #include "modules/audio_processing/include/aec_dump.h"
 #include "modules/audio_processing/include/audio_processing.h"
+#include "modules/audio_processing/include/audio_processing_statistics.h"
 #include "modules/audio_processing/render_queue_item_verifier.h"
 #include "modules/audio_processing/rms_level.h"
 #include "rtc_base/criticalsection.h"
@@ -390,6 +391,7 @@
     bool echo_path_gain_change;
     int prev_analog_mic_level;
     float prev_pre_amp_gain;
+    AudioProcessingStats stats;
   } capture_ RTC_GUARDED_BY(crit_capture_);
 
   struct ApmCaptureNonLockedState {
diff --git a/modules/audio_processing/audio_processing_unittest.cc b/modules/audio_processing/audio_processing_unittest.cc
index 18e669f..6809ab9 100644
--- a/modules/audio_processing/audio_processing_unittest.cc
+++ b/modules/audio_processing/audio_processing_unittest.cc
@@ -2801,4 +2801,42 @@
   EXPECT_FALSE(stats.delay_median_ms);
   EXPECT_FALSE(stats.delay_standard_deviation_ms);
 }
+
+TEST(ApmStatistics, ReportOutputRmsDbfs) {
+  ProcessingConfig processing_config = {
+      {{32000, 1}, {32000, 1}, {32000, 1}, {32000, 1}}};
+  AudioProcessing::Config config;
+
+  // Set up an audioframe.
+  AudioFrame frame;
+  frame.num_channels_ = 1;
+  SetFrameSampleRate(&frame, AudioProcessing::NativeRate::kSampleRate48kHz);
+
+  // Fill the audio frame with a sawtooth pattern.
+  int16_t* ptr = frame.mutable_data();
+  for (size_t i = 0; i < frame.kMaxDataSizeSamples; i++) {
+    ptr[i] = 10000 * ((i % 3) - 1);
+  }
+
+  std::unique_ptr<AudioProcessing> apm(AudioProcessingBuilder().Create());
+  apm->Initialize(processing_config);
+
+  // If not enabled, no metric should be reported.
+  EXPECT_EQ(apm->ProcessStream(&frame), 0);
+  EXPECT_FALSE(apm->GetStatistics(false).output_rms_dbfs);
+
+  // If enabled, metrics should be reported.
+  config.level_estimation.enabled = true;
+  apm->ApplyConfig(config);
+  EXPECT_EQ(apm->ProcessStream(&frame), 0);
+  auto stats = apm->GetStatistics(false);
+  EXPECT_TRUE(stats.output_rms_dbfs);
+  EXPECT_GE(*stats.output_rms_dbfs, 0);
+
+  // If re-disabled, the value is again not reported.
+  config.level_estimation.enabled = false;
+  apm->ApplyConfig(config);
+  EXPECT_EQ(apm->ProcessStream(&frame), 0);
+  EXPECT_FALSE(apm->GetStatistics(false).output_rms_dbfs);
+}
 }  // namespace webrtc
diff --git a/modules/audio_processing/gain_controller2.cc b/modules/audio_processing/gain_controller2.cc
index f256be0..2a32744 100644
--- a/modules/audio_processing/gain_controller2.cc
+++ b/modules/audio_processing/gain_controller2.cc
@@ -47,14 +47,14 @@
                                     audio->num_frames());
   // Apply fixed gain first, then the adaptive one.
   gain_applier_.ApplyGain(float_frame);
-  if (adaptive_digital_mode_) {
+  if (config_.adaptive_digital.enabled) {
     adaptive_agc_->Process(float_frame, limiter_.LastAudioLevel());
   }
   limiter_.Process(float_frame);
 }
 
 void GainController2::NotifyAnalogLevel(int level) {
-  if (analog_level_ != level && adaptive_digital_mode_) {
+  if (analog_level_ != level && config_.adaptive_digital.enabled) {
     adaptive_agc_->Reset();
   }
   analog_level_ = level;
@@ -62,31 +62,54 @@
 
 void GainController2::ApplyConfig(
     const AudioProcessing::Config::GainController2& config) {
-  RTC_DCHECK(Validate(config));
+  RTC_DCHECK(Validate(config))
+      << " the invalid config was " << ToString(config);
+
   config_ = config;
-  if (gain_applier_.GetGainFactor() != config_.fixed_gain_db) {
+  if (config.fixed_digital.gain_db != config_.fixed_digital.gain_db) {
     // Reset the limiter to quickly react on abrupt level changes caused by
     // large changes of the fixed gain.
     limiter_.Reset();
   }
-  gain_applier_.SetGainFactor(DbToRatio(config_.fixed_gain_db));
-  adaptive_digital_mode_ = config_.adaptive_digital_mode;
-  adaptive_agc_.reset(
-      new AdaptiveAgc(data_dumper_.get(), config_.extra_saturation_margin_db));
+  gain_applier_.SetGainFactor(DbToRatio(config_.fixed_digital.gain_db));
+  adaptive_agc_.reset(new AdaptiveAgc(data_dumper_.get(), config_));
 }
 
 bool GainController2::Validate(
     const AudioProcessing::Config::GainController2& config) {
-  return config.fixed_gain_db >= 0.f &&
-         config.extra_saturation_margin_db >= 0.f &&
-         config.extra_saturation_margin_db <= 100.f;
+  return config.fixed_digital.gain_db >= 0.f &&
+         config.fixed_digital.gain_db < 50.f &&
+         config.adaptive_digital.extra_saturation_margin_db >= 0.f &&
+         config.adaptive_digital.extra_saturation_margin_db <= 100.f;
 }
 
 std::string GainController2::ToString(
     const AudioProcessing::Config::GainController2& config) {
   rtc::StringBuilder ss;
-  ss << "{enabled: " << (config.enabled ? "true" : "false") << ", "
-     << "fixed_gain_dB: " << config.fixed_gain_db << "}";
+  std::string adaptive_digital_level_estimator;
+  using LevelEstimatorType =
+      AudioProcessing::Config::GainController2::LevelEstimator;
+  switch (config.adaptive_digital.level_estimator) {
+    case LevelEstimatorType::kRms:
+      adaptive_digital_level_estimator = "RMS";
+      break;
+    case LevelEstimatorType::kPeak:
+      adaptive_digital_level_estimator = "peak";
+      break;
+  }
+  // clang-format off
+  // clang formatting doesn't respect custom nested style.
+  ss << "{"
+     << "enabled: " << (config.enabled ? "true" : "false") << ", "
+     << "fixed_digital: {gain_db: " << config.fixed_digital.gain_db << "}, "
+     << "adaptive_digital: {"
+      << "enabled: "
+        << (config.adaptive_digital.enabled ? "true" : "false") << ", "
+      << "level_estimator: " << adaptive_digital_level_estimator << ", "
+      << "extra_saturation_margin_db:"
+        << config.adaptive_digital.extra_saturation_margin_db << "}"
+      << "}";
+  // clang-format on
   return ss.Release();
 }
 
diff --git a/modules/audio_processing/gain_controller2.h b/modules/audio_processing/gain_controller2.h
index 013385d..3a11810 100644
--- a/modules/audio_processing/gain_controller2.h
+++ b/modules/audio_processing/gain_controller2.h
@@ -49,7 +49,6 @@
   std::unique_ptr<AdaptiveAgc> adaptive_agc_;
   Limiter limiter_;
   int analog_level_ = -1;
-  bool adaptive_digital_mode_ = true;
 
   RTC_DISALLOW_COPY_AND_ASSIGN(GainController2);
 };
diff --git a/modules/audio_processing/gain_controller2_unittest.cc b/modules/audio_processing/gain_controller2_unittest.cc
index caaefdd..27d540a 100644
--- a/modules/audio_processing/gain_controller2_unittest.cc
+++ b/modules/audio_processing/gain_controller2_unittest.cc
@@ -52,8 +52,8 @@
 AudioProcessing::Config::GainController2 CreateAgc2FixedDigitalModeConfig(
     float fixed_gain_db) {
   AudioProcessing::Config::GainController2 config;
-  config.adaptive_digital_mode = false;
-  config.fixed_gain_db = fixed_gain_db;
+  config.adaptive_digital.enabled = false;
+  config.fixed_digital.gain_db = fixed_gain_db;
   // TODO(alessiob): Check why ASSERT_TRUE() below does not compile.
   EXPECT_TRUE(GainController2::Validate(config));
   return config;
@@ -113,29 +113,26 @@
   gain_controller2->ApplyConfig(config);
 
   // Check that attenuation is not allowed.
-  config.fixed_gain_db = -5.f;
+  config.fixed_digital.gain_db = -5.f;
   EXPECT_FALSE(GainController2::Validate(config));
 
   // Check that valid configurations are applied.
-  for (const float& fixed_gain_db : {0.f, 5.f, 10.f, 50.f}) {
-    config.fixed_gain_db = fixed_gain_db;
+  for (const float& fixed_gain_db : {0.f, 5.f, 10.f, 40.f}) {
+    config.fixed_digital.gain_db = fixed_gain_db;
     EXPECT_TRUE(GainController2::Validate(config));
     gain_controller2->ApplyConfig(config);
   }
 }
 
 TEST(GainController2, ToString) {
-  // Tests GainController2::ToString().
+  // Tests GainController2::ToString(). Only test the enabled property.
   AudioProcessing::Config::GainController2 config;
-  config.fixed_gain_db = 5.f;
 
   config.enabled = false;
-  EXPECT_EQ("{enabled: false, fixed_gain_dB: 5}",
-            GainController2::ToString(config));
+  EXPECT_EQ("{enabled: false", GainController2::ToString(config).substr(0, 15));
 
   config.enabled = true;
-  EXPECT_EQ("{enabled: true, fixed_gain_dB: 5}",
-            GainController2::ToString(config));
+  EXPECT_EQ("{enabled: true", GainController2::ToString(config).substr(0, 14));
 }
 
 TEST(GainController2FixedDigital, GainShouldChangeOnSetGain) {
@@ -263,8 +260,9 @@
   // Check that samples are not amplified as much when extra margin is
   // high. They should not be amplified at all, but only after convergence. GC2
   // starts with a gain, and it takes time until it's down to 0 dB.
-  config.extra_saturation_margin_db = 50.f;
-  config.fixed_gain_db = 0.f;
+  config.fixed_digital.gain_db = 0.f;
+  config.adaptive_digital.enabled = true;
+  config.adaptive_digital.extra_saturation_margin_db = 50.f;
   gain_controller2.ApplyConfig(config);
 
   EXPECT_LT(GainAfterProcessingFile(&gain_controller2), 2.f);
@@ -276,8 +274,9 @@
 
   AudioProcessing::Config::GainController2 config;
   // Check that some gain is applied if there is no margin.
-  config.extra_saturation_margin_db = 0.f;
-  config.fixed_gain_db = 0.f;
+  config.fixed_digital.gain_db = 0.f;
+  config.adaptive_digital.enabled = true;
+  config.adaptive_digital.extra_saturation_margin_db = 0.f;
   gain_controller2.ApplyConfig(config);
 
   EXPECT_GT(GainAfterProcessingFile(&gain_controller2), 2.f);
diff --git a/modules/audio_processing/include/audio_processing.h b/modules/audio_processing/include/audio_processing.h
index b105ef1..df51313 100644
--- a/modules/audio_processing/include/audio_processing.h
+++ b/modules/audio_processing/include/audio_processing.h
@@ -270,12 +270,24 @@
     // first applies a fixed gain. The adaptive digital AGC can be turned off by
     // setting |adaptive_digital_mode=false|.
     struct GainController2 {
+      enum LevelEstimator { kRms, kPeak };
       bool enabled = false;
-      bool adaptive_digital_mode = true;
-      float extra_saturation_margin_db = 2.f;
-      float fixed_gain_db = 0.f;
+      struct {
+        float gain_db = 0.f;
+      } fixed_digital;
+      struct {
+        bool enabled = false;
+        LevelEstimator level_estimator = kRms;
+        bool use_saturation_protector = true;
+        float extra_saturation_margin_db = 2.f;
+      } adaptive_digital;
     } gain_controller2;
 
+    // Enables reporting of |output_rms_dbfs| in webrtc::AudioProcessingStats.
+    struct LevelEstimation {
+      bool enabled = false;
+    } level_estimation;
+
     // Explicit copy assignment implementation to avoid issues with memory
     // sanitizer complaints in case of self-assignment.
     // TODO(peah): Add buildflag to ensure that this is only included for memory
diff --git a/modules/audio_processing/include/audio_processing_statistics.h b/modules/audio_processing/include/audio_processing_statistics.h
index 2ff2009..683db05 100644
--- a/modules/audio_processing/include/audio_processing_statistics.h
+++ b/modules/audio_processing/include/audio_processing_statistics.h
@@ -24,6 +24,14 @@
   AudioProcessingStats(const AudioProcessingStats& other);
   ~AudioProcessingStats();
 
+  // The root mean square (RMS) level in dBFS (decibels from digital
+  // full-scale) of the last capture frame, after processing. It is
+  // constrained to [-127, 0].
+  // The computation follows: https://tools.ietf.org/html/rfc6465
+  // with the intent that it can provide the RTP audio level indication.
+  // Only reported if level estimation is enabled in AudioProcessing::Config.
+  absl::optional<int> output_rms_dbfs;
+
   // AEC Statistics.
   // ERL = 10log_10(P_far / P_echo)
   absl::optional<double> echo_return_loss;
diff --git a/modules/audio_processing/logging/apm_data_dumper.cc b/modules/audio_processing/logging/apm_data_dumper.cc
index 45d8088..cc879c8 100644
--- a/modules/audio_processing/logging/apm_data_dumper.cc
+++ b/modules/audio_processing/logging/apm_data_dumper.cc
@@ -19,16 +19,30 @@
 #endif
 
 namespace webrtc {
-
 namespace {
 
 #if WEBRTC_APM_DEBUG_DUMP == 1
-std::string FormFileName(const char* name,
+
+#if defined(WEBRTC_WIN)
+constexpr char kPathDelimiter = '\\';
+#else
+constexpr char kPathDelimiter = '/';
+#endif
+
+std::string FormFileName(const char* output_dir,
+                         const char* name,
                          int instance_index,
                          int reinit_index,
                          const std::string& suffix) {
   char buf[1024];
   rtc::SimpleStringBuilder ss(buf);
+  const size_t output_dir_size = strlen(output_dir);
+  if (output_dir_size > 0) {
+    ss << output_dir;
+    if (output_dir[output_dir_size - 1] != kPathDelimiter) {
+      ss << kPathDelimiter;
+    }
+  }
   ss << name << "_" << instance_index << "-" << reinit_index << suffix;
   return ss.str();
 }
@@ -40,20 +54,22 @@
 ApmDataDumper::ApmDataDumper(int instance_index)
     : instance_index_(instance_index) {}
 #else
-ApmDataDumper::ApmDataDumper(int instance_index) {}
+ApmDataDumper::ApmDataDumper(int instance_index){};
 #endif
 
-ApmDataDumper::~ApmDataDumper() {}
+ApmDataDumper::~ApmDataDumper() = default;
 
 #if WEBRTC_APM_DEBUG_DUMP == 1
 bool ApmDataDumper::recording_activated_ = false;
-;
+char ApmDataDumper::output_dir_[] = "";
+
 FILE* ApmDataDumper::GetRawFile(const char* name) {
-  std::string filename =
-      FormFileName(name, instance_index_, recording_set_index_, ".dat");
+  std::string filename = FormFileName(output_dir_, name, instance_index_,
+                                      recording_set_index_, ".dat");
   auto& f = raw_files_[filename];
   if (!f) {
     f.reset(fopen(filename.c_str(), "wb"));
+    RTC_CHECK(f.get()) << "Cannot write to " << filename << ".";
   }
   return f.get();
 }
@@ -61,15 +77,14 @@
 WavWriter* ApmDataDumper::GetWavFile(const char* name,
                                      int sample_rate_hz,
                                      int num_channels) {
-  std::string filename =
-      FormFileName(name, instance_index_, recording_set_index_, ".wav");
+  std::string filename = FormFileName(output_dir_, name, instance_index_,
+                                      recording_set_index_, ".wav");
   auto& f = wav_files_[filename];
   if (!f) {
     f.reset(new WavWriter(filename.c_str(), sample_rate_hz, num_channels));
   }
   return f.get();
 }
-
 #endif
 
 }  // namespace webrtc
diff --git a/modules/audio_processing/logging/apm_data_dumper.h b/modules/audio_processing/logging/apm_data_dumper.h
index f0c5978..b541ae8 100644
--- a/modules/audio_processing/logging/apm_data_dumper.h
+++ b/modules/audio_processing/logging/apm_data_dumper.h
@@ -13,7 +13,9 @@
 
 #include <stdint.h>
 #include <stdio.h>
+#include <string.h>
 
+#include <string>
 #if WEBRTC_APM_DEBUG_DUMP == 1
 #include <unordered_map>
 #endif
@@ -21,6 +23,7 @@
 #include "api/array_view.h"
 #if WEBRTC_APM_DEBUG_DUMP == 1
 #include "common_audio/wav_file.h"
+#include "rtc_base/checks.h"
 #endif
 #include "rtc_base/constructormagic.h"
 
@@ -57,6 +60,14 @@
 #endif
   }
 
+  // Set an optional output directory.
+  static void SetOutputDirectory(const std::string& output_dir) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    RTC_CHECK_LT(output_dir.size(), kOutputDirMaxLength);
+    strncpy(output_dir_, output_dir.c_str(), output_dir.size());
+#endif
+  }
+
   // Reinitializes the data dumping such that new versions
   // of all files being dumped to are created.
   void InitiateNewSetOfRecordings() {
@@ -217,6 +228,12 @@
 #endif
   }
 
+  void DumpRaw(const char* name, rtc::ArrayView<const size_t> v) {
+#if WEBRTC_APM_DEBUG_DUMP == 1
+    DumpRaw(name, v.size(), v.data());
+#endif
+  }
+
   void DumpWav(const char* name,
                size_t v_length,
                const float* v,
@@ -244,6 +261,8 @@
  private:
 #if WEBRTC_APM_DEBUG_DUMP == 1
   static bool recording_activated_;
+  static constexpr size_t kOutputDirMaxLength = 1024;
+  static char output_dir_[kOutputDirMaxLength];
   const int instance_index_;
   int recording_set_index_ = 0;
   std::unordered_map<std::string, std::unique_ptr<FILE, RawFileCloseFunctor>>
diff --git a/modules/audio_processing/module.mk b/modules/audio_processing/module.mk
index 1b1754f..d03942e 100644
--- a/modules/audio_processing/module.mk
+++ b/modules/audio_processing/module.mk
@@ -90,12 +90,14 @@
 	modules/audio_processing/aec3/aec3_common.o \
 	modules/audio_processing/aec3/aec3_fft.o \
 	modules/audio_processing/aec3/aec_state.o \
+	modules/audio_processing/aec3/api_call_jitter_metrics.o\
 	modules/audio_processing/aec3/block_delay_buffer.o \
 	modules/audio_processing/aec3/block_framer.o \
 	modules/audio_processing/aec3/block_processor.o \
 	modules/audio_processing/aec3/block_processor2.o \
 	modules/audio_processing/aec3/block_processor_metrics.o \
 	modules/audio_processing/aec3/cascaded_biquad_filter.o \
+	modules/audio_processing/aec3/clockdrift_detector.o \
 	modules/audio_processing/aec3/comfort_noise_generator.o \
 	modules/audio_processing/aec3/decimator.o \
 	modules/audio_processing/aec3/downsampled_render_buffer.o \
@@ -131,6 +133,7 @@
 	modules/audio_processing/aec3/reverb_model_estimator.o \
 	modules/audio_processing/aec3/reverb_model_fallback.o \
 	modules/audio_processing/aec3/shadow_filter_update_gain.o \
+	modules/audio_processing/aec3/signal_dependent_erle_estimator.o \
 	modules/audio_processing/aec3/skew_estimator.o \
 	modules/audio_processing/aec3/stationarity_estimator.o \
 	modules/audio_processing/aec3/subband_erle_estimator.o \
diff --git a/modules/rtp_rtcp/BUILD.gn b/modules/rtp_rtcp/BUILD.gn
index 4f62184..edb981b 100644
--- a/modules/rtp_rtcp/BUILD.gn
+++ b/modules/rtp_rtcp/BUILD.gn
@@ -11,6 +11,7 @@
 rtc_source_set("rtp_rtcp_format") {
   visibility = [ "*" ]
   public = [
+    "include/rtcp_statistics.h",
     "include/rtp_cvo.h",
     "include/rtp_header_extension_map.h",
     "include/rtp_rtcp_defines.h",
diff --git a/modules/rtp_rtcp/include/receive_statistics.h b/modules/rtp_rtcp/include/receive_statistics.h
index 24d6f81..c299ea6 100644
--- a/modules/rtp_rtcp/include/receive_statistics.h
+++ b/modules/rtp_rtcp/include/receive_statistics.h
@@ -12,11 +12,13 @@
 #define MODULES_RTP_RTCP_INCLUDE_RECEIVE_STATISTICS_H_
 
 #include <map>
+#include <memory>
 #include <vector>
 
 #include "call/rtp_packet_sink_interface.h"
 #include "modules/include/module.h"
 #include "modules/include/module_common_types.h"
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
 #include "modules/rtp_rtcp/source/rtcp_packet/report_block.h"
 #include "rtc_base/deprecation.h"
@@ -54,14 +56,14 @@
  public:
   ~ReceiveStatistics() override = default;
 
-  static ReceiveStatistics* Create(Clock* clock);
+  static ReceiveStatistics* Create(Clock* clock) {
+    return Create(clock, nullptr, nullptr).release();
+  }
 
-  // Updates the receive statistics with this packet.
-  // TODO(bugs.webrtc.org/8016): Deprecated. Delete as soon as
-  // downstream code is updated to use OnRtpPacket.
-  RTC_DEPRECATED
-  virtual void IncomingPacket(const RTPHeader& rtp_header,
-                              size_t packet_length) = 0;
+  static std::unique_ptr<ReceiveStatistics> Create(
+      Clock* clock,
+      RtcpStatisticsCallback* rtcp_callback,
+      StreamDataCountersCallback* rtp_callback);
 
   // Increment counter for number of FEC packets received.
   virtual void FecPacketReceived(const RtpPacketReceived& packet) = 0;
@@ -74,14 +76,6 @@
   // Detect retransmissions, enabling updates of the retransmitted counters. The
   // default is false.
   virtual void EnableRetransmitDetection(uint32_t ssrc, bool enable) = 0;
-
-  // Called on new RTCP stats creation.
-  virtual void RegisterRtcpStatisticsCallback(
-      RtcpStatisticsCallback* callback) = 0;
-
-  // Called on new RTP stats creation.
-  virtual void RegisterRtpStatisticsCallback(
-      StreamDataCountersCallback* callback) = 0;
 };
 
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/include/rtcp_statistics.h b/modules/rtp_rtcp/include/rtcp_statistics.h
new file mode 100644
index 0000000..e1d576d
--- /dev/null
+++ b/modules/rtp_rtcp/include/rtcp_statistics.h
@@ -0,0 +1,36 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
+#define MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
+
+#include <stdint.h>
+
+namespace webrtc {
+
+// Statistics for an RTCP channel
+struct RtcpStatistics {
+  uint8_t fraction_lost = 0;
+  int32_t packets_lost = 0;  // Defined as a 24 bit signed integer in RTCP
+  uint32_t extended_highest_sequence_number = 0;
+  uint32_t jitter = 0;
+};
+
+class RtcpStatisticsCallback {
+ public:
+  virtual ~RtcpStatisticsCallback() {}
+
+  virtual void StatisticsUpdated(const RtcpStatistics& statistics,
+                                 uint32_t ssrc) = 0;
+  virtual void CNameChanged(const char* cname, uint32_t ssrc) = 0;
+};
+
+}  // namespace webrtc
+#endif  // MODULES_RTP_RTCP_INCLUDE_RTCP_STATISTICS_H_
diff --git a/modules/rtp_rtcp/include/rtp_header_parser.h b/modules/rtp_rtcp/include/rtp_header_parser.h
index 2d84fc1..85eab90 100644
--- a/modules/rtp_rtcp/include/rtp_header_parser.h
+++ b/modules/rtp_rtcp/include/rtp_header_parser.h
@@ -10,6 +10,7 @@
 #ifndef MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
 #define MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
 
+#include "api/rtpparameters.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
 
 namespace webrtc {
@@ -36,8 +37,14 @@
   virtual bool RegisterRtpHeaderExtension(RTPExtensionType type,
                                           uint8_t id) = 0;
 
+  // Registers an RTP header extension.
+  virtual bool RegisterRtpHeaderExtension(RtpExtension extension) = 0;
+
   // De-registers an RTP header extension.
   virtual bool DeregisterRtpHeaderExtension(RTPExtensionType type) = 0;
+
+  // De-registers an RTP header extension.
+  virtual bool DeregisterRtpHeaderExtension(RtpExtension extension) = 0;
 };
 }  // namespace webrtc
 #endif  // MODULES_RTP_RTCP_INCLUDE_RTP_HEADER_PARSER_H_
diff --git a/modules/rtp_rtcp/include/rtp_rtcp.h b/modules/rtp_rtcp/include/rtp_rtcp.h
index 265046c..d136a5e 100644
--- a/modules/rtp_rtcp/include/rtp_rtcp.h
+++ b/modules/rtp_rtcp/include/rtp_rtcp.h
@@ -18,9 +18,9 @@
 
 #include "absl/types/optional.h"
 #include "api/video/video_bitrate_allocation.h"
-#include "common_types.h"  // NOLINT(build/include)
 #include "modules/include/module.h"
 #include "modules/rtp_rtcp/include/flexfec_sender.h"
+#include "modules/rtp_rtcp/include/receive_statistics.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
 #include "rtc_base/constructormagic.h"
 #include "rtc_base/deprecation.h"
@@ -93,7 +93,8 @@
     RateLimiter* retransmission_rate_limiter = nullptr;
     OverheadObserver* overhead_observer = nullptr;
     RtpKeepAliveConfig keepalive_config;
-    RtcpIntervalConfig rtcp_interval_config;
+
+    int rtcp_report_interval_ms = 0;
 
     // Update network2 instead of pacer_exit field of video timing extension.
     bool populate_network2_timestamp = false;
diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.cc b/modules/rtp_rtcp/include/rtp_rtcp_defines.cc
index 1da8ade..d743f52 100644
--- a/modules/rtp_rtcp/include/rtp_rtcp_defines.cc
+++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.cc
@@ -9,6 +9,7 @@
  */
 
 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
+#include "modules/rtp_rtcp/source/rtp_packet.h"
 
 #include <ctype.h>
 #include <string.h>
@@ -130,4 +131,11 @@
          payload_size == rhs.payload_size && pacing_info == rhs.pacing_info;
 }
 
+void RtpPacketCounter::AddPacket(const RtpPacket& packet) {
+  ++packets;
+  header_bytes += packet.headers_size();
+  padding_bytes += packet.padding_size();
+  payload_bytes += packet.payload_size();
+}
+
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/include/rtp_rtcp_defines.h b/modules/rtp_rtcp/include/rtp_rtcp_defines.h
index 5b9a051..ab4fcae 100644
--- a/modules/rtp_rtcp/include/rtp_rtcp_defines.h
+++ b/modules/rtp_rtcp/include/rtp_rtcp_defines.h
@@ -27,6 +27,7 @@
 #define IP_PACKET_SIZE 1500  // we assume ethernet
 
 namespace webrtc {
+class RtpPacket;
 namespace rtcp {
 class TransportFeedback;
 }
@@ -40,15 +41,6 @@
 // Minimum RTP header size in bytes.
 const uint8_t kRtpHeaderSize = 12;
 
-struct RtcpIntervalConfig final {
-  RtcpIntervalConfig() = default;
-  RtcpIntervalConfig(int64_t video_interval_ms, int64_t audio_interval_ms)
-      : video_interval_ms(video_interval_ms),
-        audio_interval_ms(audio_interval_ms) {}
-  int64_t video_interval_ms = 1000;
-  int64_t audio_interval_ms = 5000;
-};
-
 struct AudioPayload {
   SdpAudioFormat format;
   uint32_t rate;
@@ -112,6 +104,7 @@
   kRtpExtensionRepairedRtpStreamId,
   kRtpExtensionMid,
   kRtpExtensionGenericFrameDescriptor,
+  kRtpExtensionColorSpace,
   kRtpExtensionNumberOfExtensions  // Must be the last entity in the enum.
 };
 
@@ -222,15 +215,6 @@
   bool media_has_been_sent;
 };
 
-class RtpData {
- public:
-  virtual ~RtpData() {}
-
-  virtual int32_t OnReceivedPayloadData(const uint8_t* payload_data,
-                                        size_t payload_size,
-                                        const WebRtcRTPHeader* rtp_header) = 0;
-};
-
 // Callback interface for packets recovered by FlexFEC or ULPFEC. In
 // the FlexFEC case, the implementation should be able to demultiplex
 // the recovered RTP packets based on SSRC.
@@ -457,13 +441,8 @@
     packets -= other.packets;
   }
 
-  void AddPacket(size_t packet_length, const RTPHeader& header) {
-    ++packets;
-    header_bytes += header.headerLength;
-    padding_bytes += header.paddingLength;
-    payload_bytes +=
-        packet_length - (header.headerLength + header.paddingLength);
-  }
+  // Not inlined, since use of RtpPacket would result in circular includes.
+  void AddPacket(const RtpPacket& packet);
 
   size_t TotalBytes() const {
     return header_bytes + payload_bytes + padding_bytes;
diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.cc b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.cc
index d24c1b0..061f827 100644
--- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.cc
+++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.cc
@@ -12,9 +12,6 @@
 
 namespace webrtc {
 
-MockRtpData::MockRtpData() = default;
-MockRtpData::~MockRtpData() = default;
-
 MockRtpRtcp::MockRtpRtcp() = default;
 MockRtpRtcp::~MockRtpRtcp() = default;
 
diff --git a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
index 9f00654..3b9b943 100644
--- a/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
+++ b/modules/rtp_rtcp/mocks/mock_rtp_rtcp.h
@@ -27,17 +27,6 @@
 
 namespace webrtc {
 
-class MockRtpData : public RtpData {
- public:
-  MockRtpData();
-  ~MockRtpData();
-
-  MOCK_METHOD3(OnReceivedPayloadData,
-               int32_t(const uint8_t* payload_data,
-                       size_t payload_size,
-                       const WebRtcRTPHeader* rtp_header));
-};
-
 class MockRtpRtcp : public RtpRtcp {
  public:
   MockRtpRtcp();
diff --git a/modules/rtp_rtcp/source/receive_statistics_impl.cc b/modules/rtp_rtcp/source/receive_statistics_impl.cc
index abe028d..bc742d1 100644
--- a/modules/rtp_rtcp/source/receive_statistics_impl.cc
+++ b/modules/rtp_rtcp/source/receive_statistics_impl.cc
@@ -11,10 +11,11 @@
 #include "modules/rtp_rtcp/source/receive_statistics_impl.h"
 
 #include <math.h>
-
 #include <cstdlib>
+#include <memory>
 #include <vector>
 
+#include "absl/memory/memory.h"
 #include "modules/remote_bitrate_estimator/test/bwe_test_logging.h"
 #include "modules/rtp_rtcp/source/rtp_packet_received.h"
 #include "modules/rtp_rtcp/source/rtp_rtcp_config.h"
@@ -33,13 +34,14 @@
     uint32_t ssrc,
     Clock* clock,
     bool enable_retransmit_detection,
+    int max_reordering_threshold,
     RtcpStatisticsCallback* rtcp_callback,
     StreamDataCountersCallback* rtp_callback)
     : ssrc_(ssrc),
       clock_(clock),
       incoming_bitrate_(kStatisticsProcessIntervalMs,
                         RateStatistics::kBpsScale),
-      max_reordering_threshold_(kDefaultMaxReorderingThreshold),
+      max_reordering_threshold_(max_reordering_threshold),
       enable_retransmit_detection_(enable_retransmit_detection),
       jitter_q4_(0),
       cumulative_loss_(0),
@@ -48,7 +50,6 @@
       received_seq_first_(0),
       received_seq_max_(0),
       received_seq_wraps_(0),
-      received_packet_overhead_(12),
       last_report_inorder_packets_(0),
       last_report_old_packets_(0),
       last_report_seq_max_(0),
@@ -57,79 +58,71 @@
 
 StreamStatisticianImpl::~StreamStatisticianImpl() = default;
 
-void StreamStatisticianImpl::IncomingPacket(const RTPHeader& header,
-                                            size_t packet_length) {
-  StreamDataCounters counters;
-  {
-    rtc::CritScope cs(&stream_lock_);
-
-    bool retransmitted =
-        enable_retransmit_detection_ && IsRetransmitOfOldPacket(header);
-    counters = UpdateCounters(header, packet_length, retransmitted);
-  }
-  rtp_callback_->DataCountersUpdated(counters, ssrc_);
+void StreamStatisticianImpl::OnRtpPacket(const RtpPacketReceived& packet) {
+  StreamDataCounters counters = UpdateCounters(packet);
+  if (rtp_callback_)
+    rtp_callback_->DataCountersUpdated(counters, ssrc_);
 }
 
 StreamDataCounters StreamStatisticianImpl::UpdateCounters(
-    const RTPHeader& header,
-    size_t packet_length,
-    bool retransmitted) {
-  bool in_order = InOrderPacketInternal(header.sequenceNumber);
-  RTC_DCHECK_EQ(ssrc_, header.ssrc);
-  incoming_bitrate_.Update(packet_length, clock_->TimeInMilliseconds());
-  receive_counters_.transmitted.AddPacket(packet_length, header);
-  if (!in_order && retransmitted) {
-    receive_counters_.retransmitted.AddPacket(packet_length, header);
+    const RtpPacketReceived& packet) {
+  rtc::CritScope cs(&stream_lock_);
+  RTC_DCHECK_EQ(ssrc_, packet.Ssrc());
+  uint16_t sequence_number = packet.SequenceNumber();
+  bool in_order =
+      // First packet is always in order.
+      last_receive_time_ms_ == 0 ||
+      IsNewerSequenceNumber(sequence_number, received_seq_max_) ||
+      // If we have a restart of the remote side this packet is still in order.
+      !IsNewerSequenceNumber(sequence_number,
+                             received_seq_max_ - max_reordering_threshold_);
+  int64_t now_ms = clock_->TimeInMilliseconds();
+
+  incoming_bitrate_.Update(packet.size(), now_ms);
+  receive_counters_.transmitted.AddPacket(packet);
+  if (!in_order && enable_retransmit_detection_ &&
+      IsRetransmitOfOldPacket(packet, now_ms)) {
+    receive_counters_.retransmitted.AddPacket(packet);
   }
 
   if (receive_counters_.transmitted.packets == 1) {
-    received_seq_first_ = header.sequenceNumber;
-    receive_counters_.first_packet_time_ms = clock_->TimeInMilliseconds();
+    received_seq_first_ = packet.SequenceNumber();
+    receive_counters_.first_packet_time_ms = now_ms;
   }
 
   // Count only the new packets received. That is, if packets 1, 2, 3, 5, 4, 6
   // are received, 4 will be ignored.
   if (in_order) {
-    // Current time in samples.
-    NtpTime receive_time = clock_->CurrentNtpTime();
-
     // Wrong if we use RetransmitOfOldPacket.
     if (receive_counters_.transmitted.packets > 1 &&
-        received_seq_max_ > header.sequenceNumber) {
+        received_seq_max_ > packet.SequenceNumber()) {
       // Wrap around detected.
       received_seq_wraps_++;
     }
     // New max.
-    received_seq_max_ = header.sequenceNumber;
+    received_seq_max_ = packet.SequenceNumber();
 
     // If new time stamp and more than one in-order packet received, calculate
     // new jitter statistics.
-    if (header.timestamp != last_received_timestamp_ &&
+    if (packet.Timestamp() != last_received_timestamp_ &&
         (receive_counters_.transmitted.packets -
          receive_counters_.retransmitted.packets) > 1) {
-      UpdateJitter(header, receive_time);
+      UpdateJitter(packet, now_ms);
     }
-    last_received_timestamp_ = header.timestamp;
-    last_receive_time_ntp_ = receive_time;
-    last_receive_time_ms_ = clock_->TimeInMilliseconds();
+    last_received_timestamp_ = packet.Timestamp();
+    last_receive_time_ms_ = now_ms;
   }
-
-  size_t packet_oh = header.headerLength + header.paddingLength;
-
-  // Our measured overhead. Filter from RFC 5104 4.2.1.2:
-  // avg_OH (new) = 15/16*avg_OH (old) + 1/16*pckt_OH,
-  received_packet_overhead_ = (15 * received_packet_overhead_ + packet_oh) >> 4;
   return receive_counters_;
 }
 
-void StreamStatisticianImpl::UpdateJitter(const RTPHeader& header,
-                                          NtpTime receive_time) {
-  uint32_t receive_time_rtp =
-      NtpToRtp(receive_time, header.payload_type_frequency);
-  uint32_t last_receive_time_rtp =
-      NtpToRtp(last_receive_time_ntp_, header.payload_type_frequency);
-  int32_t time_diff_samples = (receive_time_rtp - last_receive_time_rtp) -
-                              (header.timestamp - last_received_timestamp_);
+void StreamStatisticianImpl::UpdateJitter(const RtpPacketReceived& packet,
+                                          int64_t receive_time_ms) {
+  int64_t receive_diff_ms = receive_time_ms - last_receive_time_ms_;
+  RTC_DCHECK_GE(receive_diff_ms, 0);
+  uint32_t receive_diff_rtp = static_cast<uint32_t>(
+      (receive_diff_ms * packet.payload_type_frequency()) / 1000);
+  int32_t time_diff_samples =
+      receive_diff_rtp - (packet.Timestamp() - last_received_timestamp_);
 
   time_diff_samples = std::abs(time_diff_samples);
 
@@ -143,15 +136,16 @@
   }
 }
 
-void StreamStatisticianImpl::FecPacketReceived(const RTPHeader& header,
-                                               size_t packet_length) {
+void StreamStatisticianImpl::FecPacketReceived(
+    const RtpPacketReceived& packet) {
   StreamDataCounters counters;
   {
     rtc::CritScope cs(&stream_lock_);
-    receive_counters_.fec.AddPacket(packet_length, header);
+    receive_counters_.fec.AddPacket(packet);
     counters = receive_counters_;
   }
-  rtp_callback_->DataCountersUpdated(counters, ssrc_);
+  if (rtp_callback_)
+    rtp_callback_->DataCountersUpdated(counters, ssrc_);
 }
 
 void StreamStatisticianImpl::SetMaxReorderingThreshold(
@@ -188,7 +182,8 @@
     *statistics = CalculateRtcpStatistics();
   }
 
-  rtcp_callback_->StatisticsUpdated(*statistics, ssrc_);
+  if (rtcp_callback_)
+    rtcp_callback_->StatisticsUpdated(*statistics, ssrc_);
   return true;
 }
 
@@ -196,7 +191,7 @@
     RtcpStatistics* statistics) {
   {
     rtc::CritScope cs(&stream_lock_);
-    if (clock_->CurrentNtpInMilliseconds() - last_receive_time_ntp_.ToMs() >=
+    if (clock_->TimeInMilliseconds() - last_receive_time_ms_ >=
         kStatisticsTimeoutMs) {
       // Not active.
       return false;
@@ -210,7 +205,8 @@
     *statistics = CalculateRtcpStatistics();
   }
 
-  rtcp_callback_->StatisticsUpdated(*statistics, ssrc_);
+  if (rtcp_callback_)
+    rtcp_callback_->StatisticsUpdated(*statistics, ssrc_);
   return true;
 }
 
@@ -312,17 +308,15 @@
 }
 
 bool StreamStatisticianImpl::IsRetransmitOfOldPacket(
-    const RTPHeader& header) const {
-  if (InOrderPacketInternal(header.sequenceNumber)) {
-    return false;
-  }
-  uint32_t frequency_khz = header.payload_type_frequency / 1000;
-  assert(frequency_khz > 0);
+    const RtpPacketReceived& packet,
+    int64_t now_ms) const {
+  uint32_t frequency_khz = packet.payload_type_frequency() / 1000;
+  RTC_DCHECK_GT(frequency_khz, 0);
 
-  int64_t time_diff_ms = clock_->TimeInMilliseconds() - last_receive_time_ms_;
+  int64_t time_diff_ms = now_ms - last_receive_time_ms_;
 
   // Diff in time stamp since last received in order.
-  uint32_t timestamp_diff = header.timestamp - last_received_timestamp_;
+  uint32_t timestamp_diff = packet.Timestamp() - last_received_timestamp_;
   uint32_t rtp_time_stamp_diff_ms = timestamp_diff / frequency_khz;
 
   int64_t max_delay_ms = 0;
@@ -341,30 +335,23 @@
   return time_diff_ms > rtp_time_stamp_diff_ms + max_delay_ms;
 }
 
-bool StreamStatisticianImpl::InOrderPacketInternal(
-    uint16_t sequence_number) const {
-  // First packet is always in order.
-  if (last_receive_time_ms_ == 0)
-    return true;
-
-  if (IsNewerSequenceNumber(sequence_number, received_seq_max_)) {
-    return true;
-  } else {
-    // If we have a restart of the remote side this packet is still in order.
-    return !IsNewerSequenceNumber(
-        sequence_number, received_seq_max_ - max_reordering_threshold_);
-  }
+std::unique_ptr<ReceiveStatistics> ReceiveStatistics::Create(
+    Clock* clock,
+    RtcpStatisticsCallback* rtcp_callback,
+    StreamDataCountersCallback* rtp_callback) {
+  return absl::make_unique<ReceiveStatisticsImpl>(clock, rtcp_callback,
+                                                  rtp_callback);
 }
 
-ReceiveStatistics* ReceiveStatistics::Create(Clock* clock) {
-  return new ReceiveStatisticsImpl(clock);
-}
-
-ReceiveStatisticsImpl::ReceiveStatisticsImpl(Clock* clock)
+ReceiveStatisticsImpl::ReceiveStatisticsImpl(
+    Clock* clock,
+    RtcpStatisticsCallback* rtcp_callback,
+    StreamDataCountersCallback* rtp_callback)
     : clock_(clock),
       last_returned_ssrc_(0),
-      rtcp_stats_callback_(NULL),
-      rtp_stats_callback_(NULL) {}
+      max_reordering_threshold_(kDefaultMaxReorderingThreshold),
+      rtcp_stats_callback_(rtcp_callback),
+      rtp_stats_callback_(rtp_callback) {}
 
 ReceiveStatisticsImpl::~ReceiveStatisticsImpl() {
   while (!statisticians_.empty()) {
@@ -374,31 +361,24 @@
 }
 
 void ReceiveStatisticsImpl::OnRtpPacket(const RtpPacketReceived& packet) {
-  RTPHeader header;
-  packet.GetHeader(&header);
-  IncomingPacket(header, packet.size());
-}
-
-void ReceiveStatisticsImpl::IncomingPacket(const RTPHeader& header,
-                                           size_t packet_length) {
   StreamStatisticianImpl* impl;
   {
     rtc::CritScope cs(&receive_statistics_lock_);
-    auto it = statisticians_.find(header.ssrc);
+    auto it = statisticians_.find(packet.Ssrc());
     if (it != statisticians_.end()) {
       impl = it->second;
     } else {
       impl = new StreamStatisticianImpl(
-          header.ssrc, clock_, /* enable_retransmit_detection = */ false, this,
-          this);
-      statisticians_[header.ssrc] = impl;
+          packet.Ssrc(), clock_, /* enable_retransmit_detection = */ false,
+          max_reordering_threshold_, rtcp_stats_callback_, rtp_stats_callback_);
+      statisticians_[packet.Ssrc()] = impl;
     }
   }
   // StreamStatisticianImpl instance is created once and only destroyed when
   // this whole ReceiveStatisticsImpl is destroyed. StreamStatisticianImpl has
   // it's own locking so don't hold receive_statistics_lock_ (potential
   // deadlock).
-  impl->IncomingPacket(header, packet_length);
+  impl->OnRtpPacket(packet);
 }
 
 void ReceiveStatisticsImpl::FecPacketReceived(const RtpPacketReceived& packet) {
@@ -411,9 +391,7 @@
       return;
     impl = it->second;
   }
-  RTPHeader header;
-  packet.GetHeader(&header);
-  impl->FecPacketReceived(header, packet.size());
+  impl->FecPacketReceived(packet);
 }
 
 StreamStatistician* ReceiveStatisticsImpl::GetStatistician(
@@ -427,8 +405,13 @@
 
 void ReceiveStatisticsImpl::SetMaxReorderingThreshold(
     int max_reordering_threshold) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  for (auto& statistician : statisticians_) {
+  std::map<uint32_t, StreamStatisticianImpl*> statisticians;
+  {
+    rtc::CritScope cs(&receive_statistics_lock_);
+    max_reordering_threshold_ = max_reordering_threshold;
+    statisticians = statisticians_;
+  }
+  for (auto& statistician : statisticians) {
     statistician.second->SetMaxReorderingThreshold(max_reordering_threshold);
   }
 }
@@ -440,7 +423,9 @@
     rtc::CritScope cs(&receive_statistics_lock_);
     StreamStatisticianImpl*& impl_ref = statisticians_[ssrc];
     if (impl_ref == nullptr) {  // new element
-      impl_ref = new StreamStatisticianImpl(ssrc, clock_, enable, this, this);
+      impl_ref = new StreamStatisticianImpl(
+          ssrc, clock_, enable, max_reordering_threshold_, rtcp_stats_callback_,
+          rtp_stats_callback_);
       return;
     }
     impl = impl_ref;
@@ -448,43 +433,6 @@
   impl->EnableRetransmitDetection(enable);
 }
 
-void ReceiveStatisticsImpl::RegisterRtcpStatisticsCallback(
-    RtcpStatisticsCallback* callback) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  if (callback != NULL)
-    assert(rtcp_stats_callback_ == NULL);
-  rtcp_stats_callback_ = callback;
-}
-
-void ReceiveStatisticsImpl::StatisticsUpdated(const RtcpStatistics& statistics,
-                                              uint32_t ssrc) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  if (rtcp_stats_callback_)
-    rtcp_stats_callback_->StatisticsUpdated(statistics, ssrc);
-}
-
-void ReceiveStatisticsImpl::CNameChanged(const char* cname, uint32_t ssrc) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  if (rtcp_stats_callback_)
-    rtcp_stats_callback_->CNameChanged(cname, ssrc);
-}
-
-void ReceiveStatisticsImpl::RegisterRtpStatisticsCallback(
-    StreamDataCountersCallback* callback) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  if (callback != NULL)
-    assert(rtp_stats_callback_ == NULL);
-  rtp_stats_callback_ = callback;
-}
-
-void ReceiveStatisticsImpl::DataCountersUpdated(const StreamDataCounters& stats,
-                                                uint32_t ssrc) {
-  rtc::CritScope cs(&receive_statistics_lock_);
-  if (rtp_stats_callback_) {
-    rtp_stats_callback_->DataCountersUpdated(stats, ssrc);
-  }
-}
-
 std::vector<rtcp::ReportBlock> ReceiveStatisticsImpl::RtcpReportBlocks(
     size_t max_blocks) {
   std::map<uint32_t, StreamStatisticianImpl*> statisticians;
diff --git a/modules/rtp_rtcp/source/receive_statistics_impl.h b/modules/rtp_rtcp/source/receive_statistics_impl.h
index 56bfd2b..8153c44 100644
--- a/modules/rtp_rtcp/source/receive_statistics_impl.h
+++ b/modules/rtp_rtcp/source/receive_statistics_impl.h
@@ -19,15 +19,17 @@
 
 #include "rtc_base/criticalsection.h"
 #include "rtc_base/rate_statistics.h"
-#include "system_wrappers/include/ntp_time.h"
+#include "rtc_base/thread_annotations.h"
 
 namespace webrtc {
 
-class StreamStatisticianImpl : public StreamStatistician {
+class StreamStatisticianImpl : public StreamStatistician,
+                               public RtpPacketSinkInterface {
  public:
   StreamStatisticianImpl(uint32_t ssrc,
                          Clock* clock,
                          bool enable_retransmit_detection,
+                         int max_reordering_threshold,
                          RtcpStatisticsCallback* rtcp_callback,
                          StreamDataCountersCallback* rtp_callback);
   ~StreamStatisticianImpl() override;
@@ -41,24 +43,22 @@
       StreamDataCounters* data_counters) const override;
   uint32_t BitrateReceived() const override;
 
-  void IncomingPacket(const RTPHeader& rtp_header, size_t packet_length);
-  void FecPacketReceived(const RTPHeader& header, size_t packet_length);
+  // Implements RtpPacketSinkInterface
+  void OnRtpPacket(const RtpPacketReceived& packet) override;
+
+  void FecPacketReceived(const RtpPacketReceived& packet);
   void SetMaxReorderingThreshold(int max_reordering_threshold);
   void EnableRetransmitDetection(bool enable);
 
  private:
-  bool IsRetransmitOfOldPacket(const RTPHeader& header) const
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_);
-  bool InOrderPacketInternal(uint16_t sequence_number) const
+  bool IsRetransmitOfOldPacket(const RtpPacketReceived& packet,
+                               int64_t now_ms) const
       RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_);
   RtcpStatistics CalculateRtcpStatistics()
       RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_);
-  void UpdateJitter(const RTPHeader& header, NtpTime receive_time)
+  void UpdateJitter(const RtpPacketReceived& packet, int64_t receive_time_ms)
       RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_);
-  StreamDataCounters UpdateCounters(const RTPHeader& rtp_header,
-                                    size_t packet_length,
-                                    bool retransmitted)
-      RTC_EXCLUSIVE_LOCKS_REQUIRED(stream_lock_);
+  StreamDataCounters UpdateCounters(const RtpPacketReceived& packet);
 
   const uint32_t ssrc_;
   Clock* const clock_;
@@ -73,14 +73,12 @@
   uint32_t cumulative_loss_ RTC_GUARDED_BY(&stream_lock_);
 
   int64_t last_receive_time_ms_ RTC_GUARDED_BY(&stream_lock_);
-  NtpTime last_receive_time_ntp_ RTC_GUARDED_BY(&stream_lock_);
   uint32_t last_received_timestamp_ RTC_GUARDED_BY(&stream_lock_);
   uint16_t received_seq_first_ RTC_GUARDED_BY(&stream_lock_);
   uint16_t received_seq_max_ RTC_GUARDED_BY(&stream_lock_);
   uint16_t received_seq_wraps_ RTC_GUARDED_BY(&stream_lock_);
 
   // Current counter values.
-  size_t received_packet_overhead_ RTC_GUARDED_BY(&stream_lock_);
   StreamDataCounters receive_counters_ RTC_GUARDED_BY(&stream_lock_);
 
   // Counter values when we sent the last report.
@@ -94,47 +92,36 @@
   StreamDataCountersCallback* const rtp_callback_;
 };
 
-class ReceiveStatisticsImpl : public ReceiveStatistics,
-                              public RtcpStatisticsCallback,
-                              public StreamDataCountersCallback {
+class ReceiveStatisticsImpl : public ReceiveStatistics {
  public:
-  explicit ReceiveStatisticsImpl(Clock* clock);
+  ReceiveStatisticsImpl(Clock* clock,
+                        RtcpStatisticsCallback* rtcp_callback,
+                        StreamDataCountersCallback* rtp_callback);
 
   ~ReceiveStatisticsImpl() override;
 
-  // Implement ReceiveStatisticsProvider.
+  // Implements ReceiveStatisticsProvider.
   std::vector<rtcp::ReportBlock> RtcpReportBlocks(size_t max_blocks) override;
 
-  // Implement RtpPacketSinkInterface
+  // Implements RtpPacketSinkInterface
   void OnRtpPacket(const RtpPacketReceived& packet) override;
 
-  // Implement ReceiveStatistics.
-  void IncomingPacket(const RTPHeader& header, size_t packet_length) override;
+  // Implements ReceiveStatistics.
   void FecPacketReceived(const RtpPacketReceived& packet) override;
   StreamStatistician* GetStatistician(uint32_t ssrc) const override;
   void SetMaxReorderingThreshold(int max_reordering_threshold) override;
   void EnableRetransmitDetection(uint32_t ssrc, bool enable) override;
 
-  void RegisterRtcpStatisticsCallback(
-      RtcpStatisticsCallback* callback) override;
-
-  void RegisterRtpStatisticsCallback(
-      StreamDataCountersCallback* callback) override;
-
  private:
-  void StatisticsUpdated(const RtcpStatistics& statistics,
-                         uint32_t ssrc) override;
-  void CNameChanged(const char* cname, uint32_t ssrc) override;
-  void DataCountersUpdated(const StreamDataCounters& counters,
-                           uint32_t ssrc) override;
-
   Clock* const clock_;
   rtc::CriticalSection receive_statistics_lock_;
   uint32_t last_returned_ssrc_;
-  std::map<uint32_t, StreamStatisticianImpl*> statisticians_;
+  int max_reordering_threshold_ RTC_GUARDED_BY(receive_statistics_lock_);
+  std::map<uint32_t, StreamStatisticianImpl*> statisticians_
+      RTC_GUARDED_BY(receive_statistics_lock_);
 
-  RtcpStatisticsCallback* rtcp_stats_callback_;
-  StreamDataCountersCallback* rtp_stats_callback_;
+  RtcpStatisticsCallback* const rtcp_stats_callback_;
+  StreamDataCountersCallback* const rtp_stats_callback_;
 };
 }  // namespace webrtc
 #endif  // MODULES_RTP_RTCP_SOURCE_RECEIVE_STATISTICS_IMPL_H_
diff --git a/modules/rtp_rtcp/source/receive_statistics_unittest.cc b/modules/rtp_rtcp/source/receive_statistics_unittest.cc
index 578d81f..2539363 100644
--- a/modules/rtp_rtcp/source/receive_statistics_unittest.cc
+++ b/modules/rtp_rtcp/source/receive_statistics_unittest.cc
@@ -71,7 +71,9 @@
 class ReceiveStatisticsTest : public ::testing::Test {
  public:
   ReceiveStatisticsTest()
-      : clock_(0), receive_statistics_(ReceiveStatistics::Create(&clock_)) {
+      : clock_(0),
+        receive_statistics_(
+            ReceiveStatistics::Create(&clock_, nullptr, nullptr)) {
     packet1_ = CreateRtpPacket(kSsrc1, kPacketSize1);
     packet2_ = CreateRtpPacket(kSsrc2, kPacketSize2);
   }
@@ -251,7 +253,7 @@
     RtcpStatistics stats_;
   } callback;
 
-  receive_statistics_->RegisterRtcpStatisticsCallback(&callback);
+  receive_statistics_ = ReceiveStatistics::Create(&clock_, &callback, nullptr);
   receive_statistics_->EnableRetransmitDetection(kSsrc1, true);
 
   // Add some arbitrary data, with loss and jitter.
@@ -291,33 +293,6 @@
   EXPECT_EQ(1, statistics.packets_lost);
   EXPECT_EQ(5u, statistics.extended_highest_sequence_number);
   EXPECT_EQ(177u, statistics.jitter);
-
-  receive_statistics_->RegisterRtcpStatisticsCallback(NULL);
-
-  // Add some more data.
-  packet1_.SetSequenceNumber(1);
-  clock_.AdvanceTimeMilliseconds(7);
-  IncrementTimestamp(&packet1_, 3);
-  receive_statistics_->OnRtpPacket(packet1_);
-  IncrementSequenceNumber(&packet1_, 2);
-  clock_.AdvanceTimeMilliseconds(9);
-  IncrementTimestamp(&packet1_, 9);
-  receive_statistics_->OnRtpPacket(packet1_);
-  IncrementSequenceNumber(&packet1_, -1);
-  clock_.AdvanceTimeMilliseconds(13);
-  IncrementTimestamp(&packet1_, 47);
-  receive_statistics_->OnRtpPacket(packet1_);
-  IncrementSequenceNumber(&packet1_, 3);
-  clock_.AdvanceTimeMilliseconds(11);
-  IncrementTimestamp(&packet1_, 17);
-  receive_statistics_->OnRtpPacket(packet1_);
-  IncrementSequenceNumber(&packet1_);
-
-  receive_statistics_->GetStatistician(kSsrc1)->GetStatistics(&statistics,
-                                                              true);
-
-  // Should not have been called after deregister.
-  EXPECT_EQ(1u, callback.num_calls_);
 }
 
 class RtpTestCallback : public StreamDataCountersCallback {
@@ -358,7 +333,7 @@
 
 TEST_F(ReceiveStatisticsTest, RtpCallbacks) {
   RtpTestCallback callback;
-  receive_statistics_->RegisterRtpStatisticsCallback(&callback);
+  receive_statistics_ = ReceiveStatistics::Create(&clock_, nullptr, &callback);
   receive_statistics_->EnableRetransmitDetection(kSsrc1, true);
 
   const size_t kHeaderLength = 20;
@@ -417,19 +392,11 @@
   expected.fec.header_bytes = kHeaderLength;
   expected.fec.packets = 1;
   callback.Matches(5, kSsrc1, expected);
-
-  receive_statistics_->RegisterRtpStatisticsCallback(NULL);
-
-  // New stats, but callback should not be called.
-  IncrementSequenceNumber(&packet1);
-  clock_.AdvanceTimeMilliseconds(5);
-  receive_statistics_->OnRtpPacket(packet1);
-  callback.Matches(5, kSsrc1, expected);
 }
 
 TEST_F(ReceiveStatisticsTest, RtpCallbacksFecFirst) {
   RtpTestCallback callback;
-  receive_statistics_->RegisterRtpStatisticsCallback(&callback);
+  receive_statistics_ = ReceiveStatistics::Create(&clock_, nullptr, &callback);
 
   const uint32_t kHeaderLength = 20;
   RtpPacketReceived packet =
diff --git a/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc b/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
index 5254cd5..b301461 100644
--- a/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
+++ b/modules/rtp_rtcp/source/remote_ntp_time_estimator_unittest.cc
@@ -56,7 +56,8 @@
                               int64_t networking_delay_ms) {
     uint32_t rtcp_timestamp = GetRemoteTimestamp();
     int64_t ntp_error_fractions =
-        ntp_error_ms * NtpTime::kFractionsPerSecond / 1000;
+        ntp_error_ms * static_cast<int64_t>(NtpTime::kFractionsPerSecond) /
+        1000;
     NtpTime ntp(static_cast<uint64_t>(remote_clock_.CurrentNtpTime()) +
                 ntp_error_fractions);
     AdvanceTimeMilliseconds(kTestRtt / 2 + networking_delay_ms);
@@ -110,7 +111,17 @@
 }
 
 TEST_F(RemoteNtpTimeEstimatorTest, AveragesErrorsOut) {
-  // Remote peer sends first 5 RTCP SR without errors.
+  // Remote peer sends first 10 RTCP SR without errors.
+  AdvanceTimeMilliseconds(1000);
+  SendRtcpSr();
+  AdvanceTimeMilliseconds(1000);
+  SendRtcpSr();
+  AdvanceTimeMilliseconds(1000);
+  SendRtcpSr();
+  AdvanceTimeMilliseconds(1000);
+  SendRtcpSr();
+  AdvanceTimeMilliseconds(1000);
+  SendRtcpSr();
   AdvanceTimeMilliseconds(1000);
   SendRtcpSr();
   AdvanceTimeMilliseconds(1000);
@@ -122,18 +133,17 @@
   AdvanceTimeMilliseconds(1000);
   SendRtcpSr();
 
-  AdvanceTimeMilliseconds(15);
+  AdvanceTimeMilliseconds(150);
   uint32_t rtp_timestamp = GetRemoteTimestamp();
   int64_t capture_ntp_time_ms = local_clock_.CurrentNtpInMilliseconds();
-
   // Local peer gets enough RTCP SR to calculate the capture time.
   EXPECT_EQ(capture_ntp_time_ms, estimator_->Estimate(rtp_timestamp));
 
   // Remote sends corrupted RTCP SRs
   AdvanceTimeMilliseconds(1000);
-  SendRtcpSrInaccurately(10, 10);
+  SendRtcpSrInaccurately(/*ntp_error_ms=*/2, /*networking_delay_ms=*/-1);
   AdvanceTimeMilliseconds(1000);
-  SendRtcpSrInaccurately(-20, 5);
+  SendRtcpSrInaccurately(/*ntp_error_ms=*/-2, /*networking_delay_ms=*/1);
 
   // New RTP packet to estimate timestamp.
   AdvanceTimeMilliseconds(150);
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.cc b/modules/rtp_rtcp/source/rtcp_receiver.cc
index 3635c4a..383f785 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver.cc
@@ -131,6 +131,7 @@
     RtcpIntraFrameObserver* rtcp_intra_frame_observer,
     TransportFeedbackObserver* transport_feedback_observer,
     VideoBitrateAllocationObserver* bitrate_allocation_observer,
+    int report_interval_ms,
     ModuleRtpRtcp* owner)
     : clock_(clock),
       receiver_only_(receiver_only),
@@ -139,6 +140,7 @@
       rtcp_intra_frame_observer_(rtcp_intra_frame_observer),
       transport_feedback_observer_(transport_feedback_observer),
       bitrate_allocation_observer_(bitrate_allocation_observer),
+      report_interval_ms_(report_interval_ms),
       main_ssrc_(0),
       remote_ssrc_(0),
       remote_sender_rtp_time_(0),
@@ -561,12 +563,12 @@
   return &it->second;
 }
 
-bool RTCPReceiver::RtcpRrTimeout(int64_t rtcp_interval_ms) {
+bool RTCPReceiver::RtcpRrTimeout() {
   rtc::CritScope lock(&rtcp_receiver_lock_);
   if (last_received_rb_ms_ == 0)
     return false;
 
-  int64_t time_out_ms = kRrTimeoutIntervals * rtcp_interval_ms;
+  int64_t time_out_ms = kRrTimeoutIntervals * report_interval_ms_;
   if (clock_->TimeInMilliseconds() > last_received_rb_ms_ + time_out_ms) {
     // Reset the timer to only trigger one log.
     last_received_rb_ms_ = 0;
@@ -575,12 +577,12 @@
   return false;
 }
 
-bool RTCPReceiver::RtcpRrSequenceNumberTimeout(int64_t rtcp_interval_ms) {
+bool RTCPReceiver::RtcpRrSequenceNumberTimeout() {
   rtc::CritScope lock(&rtcp_receiver_lock_);
   if (last_increased_sequence_number_ms_ == 0)
     return false;
 
-  int64_t time_out_ms = kRrTimeoutIntervals * rtcp_interval_ms;
+  int64_t time_out_ms = kRrTimeoutIntervals * report_interval_ms_;
   if (clock_->TimeInMilliseconds() >
       last_increased_sequence_number_ms_ + time_out_ms) {
     // Reset the timer to only trigger one log.
diff --git a/modules/rtp_rtcp/source/rtcp_receiver.h b/modules/rtp_rtcp/source/rtcp_receiver.h
index a863cae..be4c70e 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver.h
+++ b/modules/rtp_rtcp/source/rtcp_receiver.h
@@ -17,6 +17,7 @@
 #include <string>
 #include <vector>
 
+#include "modules/rtp_rtcp/include/rtcp_statistics.h"
 #include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
 #include "modules/rtp_rtcp/source/rtcp_nack_stats.h"
 #include "modules/rtp_rtcp/source/rtcp_packet/dlrr.h"
@@ -56,6 +57,7 @@
                RtcpIntraFrameObserver* rtcp_intra_frame_observer,
                TransportFeedbackObserver* transport_feedback_observer,
                VideoBitrateAllocationObserver* bitrate_allocation_observer,
+               int report_interval_ms,
                ModuleRtpRtcp* owner);
   virtual ~RTCPReceiver();
 
@@ -94,13 +96,13 @@
 
   // Returns true if we haven't received an RTCP RR for several RTCP
   // intervals, but only triggers true once.
-  bool RtcpRrTimeout(int64_t rtcp_interval_ms);
+  bool RtcpRrTimeout();
 
   // Returns true if we haven't received an RTCP RR telling the receive side
   // has not received RTP packets for too long, i.e. extended highest sequence
   // number hasn't increased for several RTCP intervals. The function only
   // returns true once until a new RR is received.
-  bool RtcpRrSequenceNumberTimeout(int64_t rtcp_interval_ms);
+  bool RtcpRrSequenceNumberTimeout();
 
   std::vector<rtcp::TmmbItem> TmmbrReceived();
   // Return true if new bandwidth should be set.
@@ -215,6 +217,7 @@
   RtcpIntraFrameObserver* const rtcp_intra_frame_observer_;
   TransportFeedbackObserver* const transport_feedback_observer_;
   VideoBitrateAllocationObserver* const bitrate_allocation_observer_;
+  const int report_interval_ms_;
 
   rtc::CriticalSection rtcp_receiver_lock_;
   uint32_t main_ssrc_ RTC_GUARDED_BY(rtcp_receiver_lock_);
diff --git a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
index 4be8d73..a576fdc 100644
--- a/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_receiver_unittest.cc
@@ -107,6 +107,8 @@
 constexpr uint32_t kNotToUsSsrc = 0x654321;
 constexpr uint32_t kUnknownSenderSsrc = 0x54321;
 
+constexpr int64_t kRtcpIntervalMs = 1000;
+
 }  // namespace
 
 class RtcpReceiverTest : public ::testing::Test {
@@ -120,6 +122,7 @@
                        &intra_frame_observer_,
                        &transport_feedback_observer_,
                        &bitrate_allocation_observer_,
+                       kRtcpIntervalMs,
                        &rtp_rtcp_impl_) {}
   void SetUp() {
     std::set<uint32_t> ssrcs = {kReceiverMainSsrc, kReceiverExtraSsrc};
@@ -941,13 +944,12 @@
 }
 
 TEST_F(RtcpReceiverTest, ReceiveReportTimeout) {
-  const int64_t kRtcpIntervalMs = 1000;
   const uint16_t kSequenceNumber = 1234;
   system_clock_.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs);
 
   // No RR received, shouldn't trigger a timeout.
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   // Add a RR and advance the clock just enough to not trigger a timeout.
   rtcp::ReportBlock rb1;
@@ -962,8 +964,8 @@
   InjectRtcpPacket(rr1);
 
   system_clock_.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs - 1);
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   // Add a RR with the same extended max as the previous RR to trigger a
   // sequence number timeout, but not a RR timeout.
@@ -972,17 +974,17 @@
   InjectRtcpPacket(rr1);
 
   system_clock_.AdvanceTimeMilliseconds(2);
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_TRUE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_TRUE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   // Advance clock enough to trigger an RR timeout too.
   system_clock_.AdvanceTimeMilliseconds(3 * kRtcpIntervalMs);
-  EXPECT_TRUE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
+  EXPECT_TRUE(rtcp_receiver_.RtcpRrTimeout());
 
   // We should only get one timeout even though we still haven't received a new
   // RR.
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   // Add a new RR with increase sequence number to reset timers.
   rtcp::ReportBlock rb2;
@@ -996,8 +998,8 @@
   EXPECT_CALL(bandwidth_observer_, OnReceivedRtcpReceiverReport(_, _, _));
   InjectRtcpPacket(rr2);
 
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   // Verify we can get a timeout again once we've received new RR.
   system_clock_.AdvanceTimeMilliseconds(2 * kRtcpIntervalMs);
@@ -1006,11 +1008,11 @@
   InjectRtcpPacket(rr2);
 
   system_clock_.AdvanceTimeMilliseconds(kRtcpIntervalMs + 1);
-  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
-  EXPECT_TRUE(rtcp_receiver_.RtcpRrSequenceNumberTimeout(kRtcpIntervalMs));
+  EXPECT_FALSE(rtcp_receiver_.RtcpRrTimeout());
+  EXPECT_TRUE(rtcp_receiver_.RtcpRrSequenceNumberTimeout());
 
   system_clock_.AdvanceTimeMilliseconds(2 * kRtcpIntervalMs);
-  EXPECT_TRUE(rtcp_receiver_.RtcpRrTimeout(kRtcpIntervalMs));
+  EXPECT_TRUE(rtcp_receiver_.RtcpRrTimeout());
 }
 
 TEST_F(RtcpReceiverTest, TmmbrReceivedWithNoIncomingPacket) {
diff --git a/modules/rtp_rtcp/source/rtcp_sender.cc b/modules/rtp_rtcp/source/rtcp_sender.cc
index ac50b32..2581487 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender.cc
@@ -10,7 +10,8 @@
 
 #include "modules/rtp_rtcp/source/rtcp_sender.h"
 
-#include <string.h>  // memcpy
+#include <string.h>   // memcpy
+#include <algorithm>  // std::min
 
 #include <utility>
 
@@ -118,14 +119,14 @@
     RtcpPacketTypeCounterObserver* packet_type_counter_observer,
     RtcEventLog* event_log,
     Transport* outgoing_transport,
-    RtcpIntervalConfig interval_config)
+    int report_interval_ms)
     : audio_(audio),
       clock_(clock),
       random_(clock_->TimeInMicroseconds()),
       method_(RtcpMode::kOff),
       event_log_(event_log),
       transport_(outgoing_transport),
-      interval_config_(interval_config),
+      report_interval_ms_(report_interval_ms),
       sending_(false),
       next_time_to_send_rtcp_(0),
       timestamp_offset_(0),
@@ -180,9 +181,8 @@
 
   if (method_ == RtcpMode::kOff && new_method != RtcpMode::kOff) {
     // When switching on, reschedule the next packet
-    int64_t interval_ms = audio_ ? interval_config_.audio_interval_ms
-                                 : interval_config_.video_interval_ms;
-    next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + (interval_ms / 2);
+    next_time_to_send_rtcp_ =
+        clock_->TimeInMilliseconds() + (report_interval_ms_ / 2);
   }
   method_ = new_method;
 }
@@ -765,28 +765,24 @@
     }
 
     // generate next time to send an RTCP report
-    uint32_t minIntervalMs =
-        rtc::dchecked_cast<uint32_t>(interval_config_.audio_interval_ms);
+    int min_interval_ms = report_interval_ms_;
 
-    if (!audio_) {
-      if (sending_) {
-        // Calculate bandwidth for video; 360 / send bandwidth in kbit/s.
-        uint32_t send_bitrate_kbit = feedback_state.send_bitrate / 1000;
-        if (send_bitrate_kbit != 0)
-          minIntervalMs = 360000 / send_bitrate_kbit;
-      }
-      if (minIntervalMs >
-          rtc::dchecked_cast<uint32_t>(interval_config_.video_interval_ms)) {
-        minIntervalMs =
-            rtc::dchecked_cast<uint32_t>(interval_config_.video_interval_ms);
+    if (!audio_ && sending_) {
+      // Calculate bandwidth for video; 360 / send bandwidth in kbit/s.
+      int send_bitrate_kbit = feedback_state.send_bitrate / 1000;
+      if (send_bitrate_kbit != 0) {
+        min_interval_ms = 360000 / send_bitrate_kbit;
+        min_interval_ms = std::min(min_interval_ms, report_interval_ms_);
       }
     }
 
     // The interval between RTCP packets is varied randomly over the
     // range [1/2,3/2] times the calculated interval.
-    uint32_t timeToNext =
-        random_.Rand(minIntervalMs * 1 / 2, minIntervalMs * 3 / 2);
-    next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + timeToNext;
+    int time_to_next =
+        random_.Rand(min_interval_ms * 1 / 2, min_interval_ms * 3 / 2);
+
+    RTC_DCHECK_GT(time_to_next, 0);
+    next_time_to_send_rtcp_ = clock_->TimeInMilliseconds() + time_to_next;
 
     // RtcpSender expected to be used for sending either just sender reports
     // or just receiver reports.
@@ -966,12 +962,4 @@
   return packet.Build(max_packet_size, callback) && !send_failure;
 }
 
-int64_t RTCPSender::RtcpAudioReportInverval() const {
-  return interval_config_.audio_interval_ms;
-}
-
-int64_t RTCPSender::RtcpVideoReportInverval() const {
-  return interval_config_.video_interval_ms;
-}
-
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtcp_sender.h b/modules/rtp_rtcp/source/rtcp_sender.h
index 2720e0a..0845397 100644
--- a/modules/rtp_rtcp/source/rtcp_sender.h
+++ b/modules/rtp_rtcp/source/rtcp_sender.h
@@ -68,7 +68,7 @@
              RtcpPacketTypeCounterObserver* packet_type_counter_observer,
              RtcEventLog* event_log,
              Transport* outgoing_transport,
-             RtcpIntervalConfig interval_config);
+             int report_interval_ms);
   virtual ~RTCPSender();
 
   RtcpMode Status() const;
@@ -141,9 +141,6 @@
   void SetVideoBitrateAllocation(const VideoBitrateAllocation& bitrate);
   bool SendFeedbackPacket(const rtcp::TransportFeedback& packet);
 
-  int64_t RtcpAudioReportInverval() const;
-  int64_t RtcpVideoReportInverval() const;
-
  private:
   class RtcpContext;
 
@@ -190,7 +187,7 @@
   RtcEventLog* const event_log_;
   Transport* const transport_;
 
-  const RtcpIntervalConfig interval_config_;
+  const int report_interval_ms_;
 
   rtc::CriticalSection critical_section_rtcp_sender_;
   bool sending_ RTC_GUARDED_BY(critical_section_rtcp_sender_);
diff --git a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
index 3e37cc6..e1a436b 100644
--- a/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_sender_unittest.cc
@@ -45,7 +45,7 @@
   RtcpPacketTypeCounter counter_;
 };
 
-class TestTransport : public Transport, public RtpData {
+class TestTransport : public Transport {
  public:
   TestTransport() {}
 
@@ -58,11 +58,6 @@
     parser_.Parse(data, len);
     return true;
   }
-  int OnReceivedPayloadData(const uint8_t* payload_data,
-                            size_t payload_size,
-                            const WebRtcRTPHeader* rtp_header) override {
-    return 0;
-  }
   test::RtcpPacketParser parser_;
 };
 
@@ -84,11 +79,12 @@
     configuration.clock = &clock_;
     configuration.outgoing_transport = &test_transport_;
     configuration.retransmission_rate_limiter = &retransmission_rate_limiter_;
+    configuration.rtcp_report_interval_ms = 1000;
 
     rtp_rtcp_impl_.reset(new ModuleRtpRtcpImpl(configuration));
     rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
                                       nullptr, nullptr, &test_transport_,
-                                      configuration.rtcp_interval_config));
+                                      configuration.rtcp_report_interval_ms));
     rtcp_sender_->SetSSRC(kSenderSsrc);
     rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
     rtcp_sender_->SetTimestampOffset(kStartRtpTimestamp);
@@ -191,8 +187,7 @@
 
 TEST_F(RtcpSenderTest, DoNotSendSrBeforeRtp) {
   rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
-                                    nullptr, nullptr, &test_transport_,
-                                    RtcpIntervalConfig{}));
+                                    nullptr, nullptr, &test_transport_, 1000));
   rtcp_sender_->SetSSRC(kSenderSsrc);
   rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
   rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
@@ -210,8 +205,7 @@
 
 TEST_F(RtcpSenderTest, DoNotSendCompundBeforeRtp) {
   rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
-                                    nullptr, nullptr, &test_transport_,
-                                    RtcpIntervalConfig{}));
+                                    nullptr, nullptr, &test_transport_, 1000));
   rtcp_sender_->SetSSRC(kSenderSsrc);
   rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
   rtcp_sender_->SetRTCPStatus(RtcpMode::kCompound);
@@ -512,7 +506,7 @@
   RtcpPacketTypeCounterObserverImpl observer;
   rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
                                     &observer, nullptr, &test_transport_,
-                                    RtcpIntervalConfig{}));
+                                    1000));
   rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
   rtcp_sender_->SetRTCPStatus(RtcpMode::kReducedSize);
   EXPECT_EQ(0, rtcp_sender_->SendRTCP(feedback_state(), kRtcpPli));
@@ -634,8 +628,7 @@
 
   // Re-configure rtcp_sender_ with mock_transport_
   rtcp_sender_.reset(new RTCPSender(false, &clock_, receive_statistics_.get(),
-                                    nullptr, nullptr, &mock_transport,
-                                    RtcpIntervalConfig{}));
+                                    nullptr, nullptr, &mock_transport, 1000));
   rtcp_sender_->SetSSRC(kSenderSsrc);
   rtcp_sender_->SetRemoteSSRC(kRemoteSsrc);
   rtcp_sender_->SetTimestampOffset(kStartRtpTimestamp);
diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_config.h b/modules/rtp_rtcp/source/rtcp_transceiver_config.h
index 0da18e2..01330d0 100644
--- a/modules/rtp_rtcp/source/rtcp_transceiver_config.h
+++ b/modules/rtp_rtcp/source/rtcp_transceiver_config.h
@@ -97,11 +97,6 @@
   // Estimate RTT as non-sender as described in
   // https://tools.ietf.org/html/rfc3611#section-4.4 and #section-4.5
   bool non_sender_rtt_measurement = false;
-  // Copies LastSR/DelaySinceLastSR for previous report block to avoid
-  // triggering bug in older version of RtcpReceiver.
-  // TODO(bugs.webrtc.org/8805): Change to false by default then remove when
-  // all major webrtc clients updated with the fix in RtcpReceiver.
-  bool avoid_zero_last_sr_in_last_report_block = true;
 };
 
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
index 2c6c3ac..97c2ac0 100644
--- a/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
+++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl.cc
@@ -421,12 +421,6 @@
     auto it = remote_senders_.find(report_block.source_ssrc());
     if (it == remote_senders_.end() ||
         !it->second.last_received_sender_report) {
-      if (config_.avoid_zero_last_sr_in_last_report_block && last_sr != 0) {
-        // Simulate behaviour of the RtcpSender to avoid hitting bug in
-        // RtcpReceiver.
-        report_block.SetLastSr(last_sr);
-        report_block.SetDelayLastSr(last_delay);
-      }
       continue;
     }
     const SenderReportTimes& last_sender_report =
diff --git a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc
index 1e684a3..e86d67f 100644
--- a/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtcp_transceiver_impl_unittest.cc
@@ -670,7 +670,6 @@
 
   RtcpTransceiverConfig config;
   config.schedule_periodic_compound_packets = false;
-  config.avoid_zero_last_sr_in_last_report_block = false;
   RtcpPacketParser rtcp_parser;
   RtcpParserTransport transport(&rtcp_parser);
   config.outgoing_transport = &transport;
@@ -702,52 +701,6 @@
   EXPECT_EQ(report_blocks[1].last_sr(), 0u);
 }
 
-TEST(RtcpTransceiverImplTest, AvoidLastReportBlockToHaveZeroLastSrField) {
-  const uint32_t kRemoteSsrc1 = 54321;
-  const uint32_t kRemoteSsrc2 = 54323;
-  MockReceiveStatisticsProvider receive_statistics;
-  std::vector<ReportBlock> statistics_report_blocks(2);
-  statistics_report_blocks[0].SetMediaSsrc(kRemoteSsrc1);
-  statistics_report_blocks[1].SetMediaSsrc(kRemoteSsrc2);
-  ON_CALL(receive_statistics, RtcpReportBlocks(_))
-      .WillByDefault(Return(statistics_report_blocks));
-
-  RtcpTransceiverConfig config;
-  config.schedule_periodic_compound_packets = false;
-  config.avoid_zero_last_sr_in_last_report_block = true;
-  RtcpPacketParser rtcp_parser;
-  RtcpParserTransport transport(&rtcp_parser);
-  config.outgoing_transport = &transport;
-  config.receive_statistics = &receive_statistics;
-  RtcpTransceiverImpl rtcp_transceiver(config);
-
-  const NtpTime kRemoteNtp(0x9876543211);
-  // Receive SenderReport for RemoteSsrc1, but no report for RemoteSsrc2.
-  SenderReport sr;
-  sr.SetSenderSsrc(kRemoteSsrc1);
-  sr.SetNtp(kRemoteNtp);
-  auto raw_packet = sr.Build();
-  rtcp_transceiver.ReceivePacket(raw_packet, /*now_us=*/0);
-
-  // Trigger sending ReceiverReport.
-  rtcp_transceiver.SendCompoundPacket();
-
-  EXPECT_GT(rtcp_parser.receiver_report()->num_packets(), 0);
-  const auto& report_blocks = rtcp_parser.receiver_report()->report_blocks();
-  ASSERT_EQ(report_blocks.size(), 2u);
-  // RtcpTransceiverImpl doesn't guarantee order of the report blocks
-  // match result of ReceiveStatisticsProvider::RtcpReportBlocks callback,
-  // but for simplicity of the test asume it is the same.
-  ASSERT_EQ(report_blocks[0].source_ssrc(), kRemoteSsrc1);
-  EXPECT_NE(report_blocks[0].last_sr(), 0u);
-
-  ASSERT_EQ(report_blocks[1].source_ssrc(), kRemoteSsrc2);
-  // No Sender Report for kRemoteSsrc2, use same LastSR as for kRemoteSsrc1
-  EXPECT_EQ(report_blocks[1].last_sr(), report_blocks[0].last_sr());
-  EXPECT_EQ(report_blocks[1].delay_since_last_sr(),
-            report_blocks[0].delay_since_last_sr());
-}
-
 TEST(RtcpTransceiverImplTest,
      WhenSendsReceiverReportCalculatesDelaySinceLastSenderReport) {
   const uint32_t kRemoteSsrc1 = 4321;
diff --git a/modules/rtp_rtcp/source/rtp_header_extension_map.cc b/modules/rtp_rtcp/source/rtp_header_extension_map.cc
index dde25e3..8e0a484 100644
--- a/modules/rtp_rtcp/source/rtp_header_extension_map.cc
+++ b/modules/rtp_rtcp/source/rtp_header_extension_map.cc
@@ -43,6 +43,7 @@
     CreateExtensionInfo<RepairedRtpStreamId>(),
     CreateExtensionInfo<RtpMid>(),
     CreateExtensionInfo<RtpGenericFrameDescriptorExtension>(),
+    CreateExtensionInfo<ColorSpaceExtension>(),
 };
 
 // Because of kRtpExtensionNone, NumberOfExtension is 1 bigger than the actual
diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.cc b/modules/rtp_rtcp/source/rtp_header_extensions.cc
index 082e0e0..92694cd 100644
--- a/modules/rtp_rtcp/source/rtp_header_extensions.cc
+++ b/modules/rtp_rtcp/source/rtp_header_extensions.cc
@@ -11,6 +11,7 @@
 #include "modules/rtp_rtcp/source/rtp_header_extensions.h"
 
 #include <string.h>
+#include <cmath>
 
 #include "modules/rtp_rtcp/include/rtp_cvo.h"
 #include "modules/rtp_rtcp/source/byte_io.h"
@@ -433,6 +434,172 @@
   return true;
 }
 
+// Color space including HDR metadata as an optional field.
+//
+// RTP header extension to carry HDR metadata.
+// Float values are upscaled by a static factor and transmitted as integers.
+//
+// Data layout with HDR metadata
+//    0                   1                   2                   3
+//    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |       ID      |   length=30    |   Primaries   |    Transfer    |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |     Matrix    |      Range     |                 luminance_max  |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |               |                  luminance_min                  |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |              mastering_metadata.primary_r.x and .y              |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |              mastering_metadata.primary_g.x and .y              |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |              mastering_metadata.primary_b.x and .y              |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |                mastering_metadata.white.x and .y                |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |     max_content_light_level    | max_frame_average_light_level  |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// Data layout without HDR metadata
+//    0                   1                   2                   3
+//    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |       ID      |    length=4    |   Primaries   |    Transfer    |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//   |     Matrix    |      Range     |
+//   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+
+constexpr RTPExtensionType ColorSpaceExtension::kId;
+constexpr uint8_t ColorSpaceExtension::kValueSizeBytes;
+constexpr const char ColorSpaceExtension::kUri[];
+
+bool ColorSpaceExtension::Parse(rtc::ArrayView<const uint8_t> data,
+                                ColorSpace* color_space) {
+  RTC_DCHECK(color_space);
+  if (data.size() != kValueSizeBytes &&
+      data.size() != kValueSizeBytesWithoutHdrMetadata)
+    return false;
+
+  size_t offset = 0;
+  // Read color space information.
+  if (!color_space->set_primaries_from_uint8(data.data()[offset++]))
+    return false;
+  if (!color_space->set_transfer_from_uint8(data.data()[offset++]))
+    return false;
+  if (!color_space->set_matrix_from_uint8(data.data()[offset++]))
+    return false;
+  if (!color_space->set_range_from_uint8(data.data()[offset++]))
+    return false;
+
+  // Read HDR metadata if it exists, otherwise clear it.
+  if (data.size() == kValueSizeBytesWithoutHdrMetadata) {
+    color_space->set_hdr_metadata(nullptr);
+  } else {
+    HdrMetadata hdr_metadata;
+    offset += ParseLuminance(data.data() + offset,
+                             &hdr_metadata.mastering_metadata.luminance_max,
+                             kLuminanceMaxDenominator);
+    offset += ParseLuminance(data.data() + offset,
+                             &hdr_metadata.mastering_metadata.luminance_min,
+                             kLuminanceMinDenominator);
+    offset += ParseChromaticity(data.data() + offset,
+                                &hdr_metadata.mastering_metadata.primary_r);
+    offset += ParseChromaticity(data.data() + offset,
+                                &hdr_metadata.mastering_metadata.primary_g);
+    offset += ParseChromaticity(data.data() + offset,
+                                &hdr_metadata.mastering_metadata.primary_b);
+    offset += ParseChromaticity(data.data() + offset,
+                                &hdr_metadata.mastering_metadata.white_point);
+    hdr_metadata.max_content_light_level =
+        ByteReader<uint16_t>::ReadBigEndian(data.data() + offset);
+    offset += 2;
+    hdr_metadata.max_frame_average_light_level =
+        ByteReader<uint16_t>::ReadBigEndian(data.data() + offset);
+    offset += 2;
+    color_space->set_hdr_metadata(&hdr_metadata);
+  }
+  RTC_DCHECK_EQ(ValueSize(*color_space), offset);
+  return true;
+}
+
+bool ColorSpaceExtension::Write(rtc::ArrayView<uint8_t> data,
+                                const ColorSpace& color_space) {
+  RTC_DCHECK(data.size() >= ValueSize(color_space));
+  size_t offset = 0;
+  // Write color space information.
+  data.data()[offset++] = static_cast<uint8_t>(color_space.primaries());
+  data.data()[offset++] = static_cast<uint8_t>(color_space.transfer());
+  data.data()[offset++] = static_cast<uint8_t>(color_space.matrix());
+  data.data()[offset++] = static_cast<uint8_t>(color_space.range());
+
+  // Write HDR metadata if it exists.
+  if (color_space.hdr_metadata()) {
+    const HdrMetadata& hdr_metadata = *color_space.hdr_metadata();
+    offset += WriteLuminance(data.data() + offset,
+                             hdr_metadata.mastering_metadata.luminance_max,
+                             kLuminanceMaxDenominator);
+    offset += WriteLuminance(data.data() + offset,
+                             hdr_metadata.mastering_metadata.luminance_min,
+                             kLuminanceMinDenominator);
+    offset += WriteChromaticity(data.data() + offset,
+                                hdr_metadata.mastering_metadata.primary_r);
+    offset += WriteChromaticity(data.data() + offset,
+                                hdr_metadata.mastering_metadata.primary_g);
+    offset += WriteChromaticity(data.data() + offset,
+                                hdr_metadata.mastering_metadata.primary_b);
+    offset += WriteChromaticity(data.data() + offset,
+                                hdr_metadata.mastering_metadata.white_point);
+
+    ByteWriter<uint16_t>::WriteBigEndian(data.data() + offset,
+                                         hdr_metadata.max_content_light_level);
+    offset += 2;
+    ByteWriter<uint16_t>::WriteBigEndian(
+        data.data() + offset, hdr_metadata.max_frame_average_light_level);
+    offset += 2;
+  }
+  RTC_DCHECK_EQ(ValueSize(color_space), offset);
+  return true;
+}
+
+size_t ColorSpaceExtension::ParseChromaticity(
+    const uint8_t* data,
+    HdrMasteringMetadata::Chromaticity* p) {
+  uint16_t chromaticity_x_scaled = ByteReader<uint16_t>::ReadBigEndian(data);
+  uint16_t chromaticity_y_scaled =
+      ByteReader<uint16_t>::ReadBigEndian(data + 2);
+  p->x = static_cast<float>(chromaticity_x_scaled) / kChromaticityDenominator;
+  p->y = static_cast<float>(chromaticity_y_scaled) / kChromaticityDenominator;
+  return 4;  // Return number of bytes read.
+}
+
+size_t ColorSpaceExtension::ParseLuminance(const uint8_t* data,
+                                           float* f,
+                                           int denominator) {
+  uint32_t luminance_scaled = ByteReader<uint32_t, 3>::ReadBigEndian(data);
+  *f = static_cast<float>(luminance_scaled) / denominator;
+  return 3;  // Return number of bytes read.
+}
+
+size_t ColorSpaceExtension::WriteChromaticity(
+    uint8_t* data,
+    const HdrMasteringMetadata::Chromaticity& p) {
+  RTC_DCHECK_GE(p.x, 0.0f);
+  RTC_DCHECK_GE(p.y, 0.0f);
+  ByteWriter<uint16_t>::WriteBigEndian(
+      data, std::round(p.x * kChromaticityDenominator));
+  ByteWriter<uint16_t>::WriteBigEndian(
+      data + 2, std::round(p.y * kChromaticityDenominator));
+  return 4;  // Return number of bytes written.
+}
+
+size_t ColorSpaceExtension::WriteLuminance(uint8_t* data,
+                                           float f,
+                                           int denominator) {
+  RTC_DCHECK_GE(f, 0.0f);
+  ByteWriter<uint32_t, 3>::WriteBigEndian(data, std::round(f * denominator));
+  return 3;  // Return number of bytes written.
+}
+
 bool BaseRtpStringExtension::Parse(rtc::ArrayView<const uint8_t> data,
                                    StringRtpHeaderExtension* str) {
   if (data.empty() || data[0] == 0)  // Valid string extension can't be empty.
diff --git a/modules/rtp_rtcp/source/rtp_header_extensions.h b/modules/rtp_rtcp/source/rtp_header_extensions.h
index 808356a..42a6216 100644
--- a/modules/rtp_rtcp/source/rtp_header_extensions.h
+++ b/modules/rtp_rtcp/source/rtp_header_extensions.h
@@ -16,6 +16,7 @@
 
 #include "api/array_view.h"
 #include "api/rtp_headers.h"
+#include "api/video/color_space.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_frame_marking.h"
 #include "api/video/video_rotation.h"
@@ -181,6 +182,36 @@
   static bool IsScalable(uint8_t temporal_id, uint8_t layer_id);
 };
 
+class ColorSpaceExtension {
+ public:
+  using value_type = ColorSpace;
+  static constexpr RTPExtensionType kId = kRtpExtensionColorSpace;
+  static constexpr uint8_t kValueSizeBytes = 30;
+  static constexpr uint8_t kValueSizeBytesWithoutHdrMetadata = 4;
+  // TODO(webrtc:8651): Change to a valid uri.
+  static constexpr const char kUri[] = "rtp-colorspace-uri-placeholder";
+
+  static bool Parse(rtc::ArrayView<const uint8_t> data,
+                    ColorSpace* color_space);
+  static size_t ValueSize(const ColorSpace& color_space) {
+    return color_space.hdr_metadata() ? kValueSizeBytes
+                                      : kValueSizeBytesWithoutHdrMetadata;
+  }
+  static bool Write(rtc::ArrayView<uint8_t> data,
+                    const ColorSpace& color_space);
+
+ private:
+  static constexpr int kChromaticityDenominator = 10000;  // 0.0001 resolution.
+  static constexpr int kLuminanceMaxDenominator = 100;    // 0.01 resolution.
+  static constexpr int kLuminanceMinDenominator = 10000;  // 0.0001 resolution.
+  static size_t ParseChromaticity(const uint8_t* data,
+                                  HdrMasteringMetadata::Chromaticity* p);
+  static size_t ParseLuminance(const uint8_t* data, float* f, int denominator);
+  static size_t WriteChromaticity(uint8_t* data,
+                                  const HdrMasteringMetadata::Chromaticity& p);
+  static size_t WriteLuminance(uint8_t* data, float f, int denominator);
+};
+
 // Base extension class for RTP header extensions which are strings.
 // Subclasses must defined kId and kUri static constexpr members.
 class BaseRtpStringExtension {
diff --git a/modules/rtp_rtcp/source/rtp_header_parser.cc b/modules/rtp_rtcp/source/rtp_header_parser.cc
index bc05033..6481a40 100644
--- a/modules/rtp_rtcp/source/rtp_header_parser.cc
+++ b/modules/rtp_rtcp/source/rtp_header_parser.cc
@@ -28,8 +28,10 @@
              RTPHeader* header) const override;
 
   bool RegisterRtpHeaderExtension(RTPExtensionType type, uint8_t id) override;
+  bool RegisterRtpHeaderExtension(RtpExtension extension) override;
 
   bool DeregisterRtpHeaderExtension(RTPExtensionType type) override;
+  bool DeregisterRtpHeaderExtension(RtpExtension extension) override;
 
  private:
   rtc::CriticalSection critical_section_;
@@ -66,6 +68,10 @@
   }
   return true;
 }
+bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RtpExtension extension) {
+  rtc::CritScope cs(&critical_section_);
+  return rtp_header_extension_map_.RegisterByUri(extension.id, extension.uri);
+}
 
 bool RtpHeaderParserImpl::RegisterRtpHeaderExtension(RTPExtensionType type,
                                                      uint8_t id) {
@@ -73,6 +79,12 @@
   return rtp_header_extension_map_.RegisterByType(id, type);
 }
 
+bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RtpExtension extension) {
+  rtc::CritScope cs(&critical_section_);
+  return rtp_header_extension_map_.Deregister(
+      rtp_header_extension_map_.GetType(extension.id));
+}
+
 bool RtpHeaderParserImpl::DeregisterRtpHeaderExtension(RTPExtensionType type) {
   rtc::CritScope cs(&critical_section_);
   return rtp_header_extension_map_.Deregister(type) == 0;
diff --git a/modules/rtp_rtcp/source/rtp_packet_received.cc b/modules/rtp_rtcp/source/rtp_packet_received.cc
index 93c0a1e..f80fad6 100644
--- a/modules/rtp_rtcp/source/rtp_packet_received.cc
+++ b/modules/rtp_rtcp/source/rtp_packet_received.cc
@@ -69,6 +69,7 @@
   GetExtension<RepairedRtpStreamId>(&header->extension.repaired_stream_id);
   GetExtension<RtpMid>(&header->extension.mid);
   GetExtension<PlayoutDelayLimits>(&header->extension.playout_delay);
+  header->extension.color_space = GetExtension<ColorSpaceExtension>();
 }
 
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtp_packet_unittest.cc b/modules/rtp_rtcp/source/rtp_packet_unittest.cc
index b485df6..b1c0e42 100644
--- a/modules/rtp_rtcp/source/rtp_packet_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_packet_unittest.cc
@@ -185,6 +185,52 @@
     0x04, 0x00, 0x00, 0x00,
     0x00, 0x00, 0x00, 0x00};
 // clang-format on
+
+HdrMetadata CreateTestHdrMetadata() {
+  // Random but reasonable HDR metadata.
+  HdrMetadata hdr_metadata;
+  hdr_metadata.mastering_metadata.luminance_max = 2000.0;
+  hdr_metadata.mastering_metadata.luminance_min = 2.0001;
+  hdr_metadata.mastering_metadata.primary_r.x = 0.3003;
+  hdr_metadata.mastering_metadata.primary_r.y = 0.4004;
+  hdr_metadata.mastering_metadata.primary_g.x = 0.3201;
+  hdr_metadata.mastering_metadata.primary_g.y = 0.4604;
+  hdr_metadata.mastering_metadata.primary_b.x = 0.3409;
+  hdr_metadata.mastering_metadata.primary_b.y = 0.4907;
+  hdr_metadata.mastering_metadata.white_point.x = 0.4103;
+  hdr_metadata.mastering_metadata.white_point.y = 0.4806;
+  hdr_metadata.max_content_light_level = 2345;
+  hdr_metadata.max_frame_average_light_level = 1789;
+  return hdr_metadata;
+}
+
+ColorSpace CreateTestColorSpace(bool with_hdr_metadata) {
+  ColorSpace color_space(
+      ColorSpace::PrimaryID::kBT709, ColorSpace::TransferID::kGAMMA22,
+      ColorSpace::MatrixID::kSMPTE2085, ColorSpace::RangeID::kFull);
+  if (with_hdr_metadata) {
+    HdrMetadata hdr_metadata = CreateTestHdrMetadata();
+    color_space.set_hdr_metadata(&hdr_metadata);
+  }
+  return color_space;
+}
+
+void TestCreateAndParseColorSpaceExtension(bool with_hdr_metadata) {
+  // Create packet with extension.
+  RtpPacket::ExtensionManager extensions(/*extmap-allow-mixed=*/true);
+  extensions.Register<ColorSpaceExtension>(1);
+  RtpPacket packet(&extensions);
+  const ColorSpace kColorSpace = CreateTestColorSpace(with_hdr_metadata);
+  EXPECT_TRUE(packet.SetExtension<ColorSpaceExtension>(kColorSpace));
+  packet.SetPayloadSize(42);
+
+  // Read packet with the extension.
+  RtpPacketReceived parsed(&extensions);
+  EXPECT_TRUE(parsed.Parse(packet.Buffer()));
+  ColorSpace parsed_color_space;
+  EXPECT_TRUE(parsed.GetExtension<ColorSpaceExtension>(&parsed_color_space));
+  EXPECT_EQ(kColorSpace, parsed_color_space);
+}
 }  // namespace
 
 TEST(RtpPacketTest, CreateMinimum) {
@@ -801,4 +847,12 @@
   EXPECT_EQ(receivied_timing.flags, 0);
 }
 
+TEST(RtpPacketTest, CreateAndParseColorSpaceExtension) {
+  TestCreateAndParseColorSpaceExtension(/*with_hdr_metadata=*/true);
+}
+
+TEST(RtpPacketTest, CreateAndParseColorSpaceExtensionWithoutHdrMetadata) {
+  TestCreateAndParseColorSpaceExtension(/*with_hdr_metadata=*/false);
+}
+
 }  // namespace webrtc
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
index 5726284..0d0ca96 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.cc
@@ -33,6 +33,8 @@
 const int64_t kRtpRtcpRttProcessTimeMs = 1000;
 const int64_t kRtpRtcpBitrateProcessTimeMs = 10;
 const int64_t kDefaultExpectedRetransmissionTimeMs = 125;
+constexpr int32_t kDefaultVideoReportInterval = 1000;
+constexpr int32_t kDefaultAudioReportInterval = 5000;
 }  // namespace
 
 RtpRtcp::Configuration::Configuration() = default;
@@ -64,7 +66,10 @@
                    configuration.rtcp_packet_type_counter_observer,
                    configuration.event_log,
                    configuration.outgoing_transport,
-                   configuration.rtcp_interval_config),
+                   configuration.rtcp_report_interval_ms > 0
+                       ? configuration.rtcp_report_interval_ms
+                       : (configuration.audio ? kDefaultAudioReportInterval
+                                              : kDefaultVideoReportInterval)),
       rtcp_receiver_(configuration.clock,
                      configuration.receiver_only,
                      configuration.rtcp_packet_type_counter_observer,
@@ -72,6 +77,10 @@
                      configuration.intra_frame_callback,
                      configuration.transport_feedback_callback,
                      configuration.bitrate_allocation_observer,
+                     configuration.rtcp_report_interval_ms > 0
+                         ? configuration.rtcp_report_interval_ms
+                         : (configuration.audio ? kDefaultAudioReportInterval
+                                                : kDefaultVideoReportInterval),
                      this),
       clock_(configuration.clock),
       audio_(configuration.audio),
@@ -179,10 +188,9 @@
 
     // Verify receiver reports are delivered and the reported sequence number
     // is increasing.
-    int64_t rtcp_interval = RtcpReportInterval();
-    if (rtcp_receiver_.RtcpRrTimeout(rtcp_interval)) {
+    if (rtcp_receiver_.RtcpRrTimeout()) {
       RTC_LOG_F(LS_WARNING) << "Timeout: No RTCP RR received.";
-    } else if (rtcp_receiver_.RtcpRrSequenceNumberTimeout(rtcp_interval)) {
+    } else if (rtcp_receiver_.RtcpRrSequenceNumberTimeout()) {
       RTC_LOG_F(LS_WARNING) << "Timeout: No increase in RTCP RR extended "
                                "highest sequence number.";
     }
@@ -859,13 +867,6 @@
   return rtcp_receiver_.BoundingSet(tmmbr_owner);
 }
 
-int64_t ModuleRtpRtcpImpl::RtcpReportInterval() {
-  if (audio_)
-    return rtcp_sender_.RtcpAudioReportInverval();
-  else
-    return rtcp_sender_.RtcpVideoReportInverval();
-}
-
 void ModuleRtpRtcpImpl::SetRtcpReceiverSsrcs(uint32_t main_ssrc) {
   std::set<uint32_t> ssrcs;
   ssrcs.insert(main_ssrc);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl.h b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
index 37516de..8e9751d 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl.h
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl.h
@@ -326,7 +326,6 @@
  private:
   FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, Rtt);
   FRIEND_TEST_ALL_PREFIXES(RtpRtcpImplTest, RttForReceiverOnly);
-  int64_t RtcpReportInterval();
   void SetRtcpReceiverSsrcs(uint32_t main_ssrc);
 
   void set_rtt_ms(int64_t rtt_ms);
diff --git a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
index 5160a64..632a537 100644
--- a/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_rtcp_impl_unittest.cc
@@ -49,7 +49,7 @@
   int64_t rtt_ms_;
 };
 
-class SendTransport : public Transport, public RtpData {
+class SendTransport : public Transport {
  public:
   SendTransport()
       : receiver_(nullptr),
@@ -90,11 +90,6 @@
     ++rtcp_packets_sent_;
     return true;
   }
-  int32_t OnReceivedPayloadData(const uint8_t* payload_data,
-                                size_t payload_size,
-                                const WebRtcRTPHeader* rtp_header) override {
-    return 0;
-  }
   void SetKeepalivePayloadType(uint8_t payload_type) {
     keepalive_payload_type_ = payload_type;
   }
@@ -129,7 +124,7 @@
   std::unique_ptr<ModuleRtpRtcpImpl> impl_;
   uint32_t remote_ssrc_;
   RtpKeepAliveConfig keepalive_config_;
-  RtcpIntervalConfig rtcp_interval_config_;
+  int rtcp_report_interval_ms_ = 0;
 
   void SetRemoteSsrc(uint32_t ssrc) {
     remote_ssrc_ = ssrc;
@@ -164,8 +159,8 @@
     CreateModuleImpl();
     transport_.SetKeepalivePayloadType(config.payload_type);
   }
-  void SetRtcpIntervalConfigAndReset(const RtcpIntervalConfig& config) {
-    rtcp_interval_config_ = config;
+  void SetRtcpReportIntervalAndReset(int rtcp_report_interval_ms) {
+    rtcp_report_interval_ms_ = rtcp_report_interval_ms;
     CreateModuleImpl();
   }
 
@@ -179,7 +174,7 @@
     config.rtcp_packet_type_counter_observer = this;
     config.rtt_stats = &rtt_stats_;
     config.keepalive_config = keepalive_config_;
-    config.rtcp_interval_config = rtcp_interval_config_;
+    config.rtcp_report_interval_ms = rtcp_report_interval_ms_;
 
     impl_.reset(new ModuleRtpRtcpImpl(config));
     impl_->SetRTCPStatus(RtcpMode::kCompound);
@@ -648,11 +643,8 @@
 TEST_F(RtpRtcpImplTest, ConfigurableRtcpReportInterval) {
   const int kVideoReportInterval = 3000;
 
-  RtcpIntervalConfig config;
-  config.video_interval_ms = kVideoReportInterval;
-
   // Recreate sender impl with new configuration, and redo setup.
-  sender_.SetRtcpIntervalConfigAndReset(config);
+  sender_.SetRtcpReportIntervalAndReset(kVideoReportInterval);
   SetUp();
 
   SendFrame(&sender_, kBaseLayerTid);
diff --git a/modules/rtp_rtcp/source/rtp_sender.cc b/modules/rtp_rtcp/source/rtp_sender.cc
index 38d6030..ddf91f5 100644
--- a/modules/rtp_rtcp/source/rtp_sender.cc
+++ b/modules/rtp_rtcp/source/rtp_sender.cc
@@ -95,14 +95,6 @@
   }
   return "";
 }
-
-void CountPacket(RtpPacketCounter* counter, const RtpPacketToSend& packet) {
-  ++counter->packets;
-  counter->header_bytes += packet.headers_size();
-  counter->padding_bytes += packet.padding_size();
-  counter->payload_bytes += packet.payload_size();
-}
-
 }  // namespace
 
 RTPSender::RTPSender(
@@ -883,13 +875,13 @@
     counters->first_packet_time_ms = now_ms;
 
   if (IsFecPacket(packet))
-    CountPacket(&counters->fec, packet);
+    counters->fec.AddPacket(packet);
 
   if (is_retransmit) {
-    CountPacket(&counters->retransmitted, packet);
+    counters->retransmitted.AddPacket(packet);
     nack_bitrate_sent_.Update(packet.size(), now_ms);
   }
-  CountPacket(&counters->transmitted, packet);
+  counters->transmitted.AddPacket(packet);
 
   if (rtp_stats_callback_)
     rtp_stats_callback_->DataCountersUpdated(*counters, packet.Ssrc());
@@ -927,15 +919,6 @@
   RTC_DCHECK(packet);
   int64_t now_ms = clock_->TimeInMilliseconds();
 
-  // |capture_time_ms| <= 0 is considered invalid.
-  // TODO(holmer): This should be changed all over Video Engine so that negative
-  // time is consider invalid, while 0 is considered a valid time.
-  if (packet->capture_time_ms() > 0) {
-    packet->SetExtension<TransmissionOffset>(
-        kTimestampTicksPerMs * (now_ms - packet->capture_time_ms()));
-  }
-  packet->SetExtension<AbsoluteSendTime>(AbsoluteSendTime::MsTo24Bits(now_ms));
-
   if (video_) {
     BWE_TEST_LOGGING_PLOT_WITH_SSRC(1, "VideoTotBitrate_kbps", now_ms,
                                     ActualSendBitrateKbit(), packet->Ssrc());
@@ -979,6 +962,20 @@
   PacketOptions options;
   options.is_retransmit = false;
 
+  // |capture_time_ms| <= 0 is considered invalid.
+  // TODO(holmer): This should be changed all over Video Engine so that negative
+  // time is consider invalid, while 0 is considered a valid time.
+  if (packet->capture_time_ms() > 0) {
+    packet->SetExtension<TransmissionOffset>(
+        kTimestampTicksPerMs * (now_ms - packet->capture_time_ms()));
+
+    if (populate_network2_timestamp_ &&
+        packet->HasExtension<VideoTimingExtension>()) {
+      packet->set_network2_time_ms(now_ms);
+    }
+  }
+  packet->SetExtension<AbsoluteSendTime>(AbsoluteSendTime::MsTo24Bits(now_ms));
+
   bool has_transport_seq_num;
   {
     rtc::CritScope lock(&send_critsect_);
diff --git a/modules/rtp_rtcp/source/rtp_sender_unittest.cc b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
index f30b383..a687bcb 100644
--- a/modules/rtp_rtcp/source/rtp_sender_unittest.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_unittest.cc
@@ -690,8 +690,8 @@
   EXPECT_EQ(kStoredTimeInMs, video_timing.pacer_exit_delta_ms);
 }
 
-TEST_P(RtpSenderTest, WritesNetwork2ToTimingExtension) {
-  SetUpRtpSender(true, true);
+TEST_P(RtpSenderTest, WritesNetwork2ToTimingExtensionWithPacer) {
+  SetUpRtpSender(/*pacer=*/true, /*populate_network2=*/true);
   rtp_sender_->SetStorePacketsStatus(true, 10);
   EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
                    kRtpExtensionVideoTiming, kVideoTimingExtensionId));
@@ -729,6 +729,31 @@
   EXPECT_EQ(kPacerExitMs, video_timing.pacer_exit_delta_ms);
 }
 
+TEST_P(RtpSenderTest, WritesNetwork2ToTimingExtensionWithoutPacer) {
+  SetUpRtpSender(/*pacer=*/false, /*populate_network2=*/true);
+  EXPECT_EQ(0, rtp_sender_->RegisterRtpHeaderExtension(
+                   kRtpExtensionVideoTiming, kVideoTimingExtensionId));
+  auto packet = rtp_sender_->AllocatePacket();
+  packet->SetMarker(true);
+  packet->set_capture_time_ms(fake_clock_.TimeInMilliseconds());
+  const VideoSendTiming kVideoTiming = {0u, 0u, 0u, 0u, 0u, 0u, true};
+  packet->SetExtension<VideoTimingExtension>(kVideoTiming);
+  EXPECT_TRUE(rtp_sender_->AssignSequenceNumber(packet.get()));
+
+  const int kPropagateTimeMs = 10;
+  fake_clock_.AdvanceTimeMilliseconds(kPropagateTimeMs);
+
+  EXPECT_TRUE(rtp_sender_->SendToNetwork(std::move(packet),
+                                         kAllowRetransmission,
+                                         RtpPacketSender::kNormalPriority));
+
+  EXPECT_EQ(1, transport_.packets_sent());
+  absl::optional<VideoSendTiming> video_timing =
+      transport_.last_sent_packet().GetExtension<VideoTimingExtension>();
+  ASSERT_TRUE(video_timing);
+  EXPECT_EQ(kPropagateTimeMs, video_timing->network2_timestamp_delta_ms);
+}
+
 TEST_P(RtpSenderTest, TrafficSmoothingWithExtensions) {
   EXPECT_CALL(mock_paced_sender_, InsertPacket(RtpPacketSender::kNormalPriority,
                                                kSsrc, kSeqNum, _, _, _));
diff --git a/modules/rtp_rtcp/source/rtp_utility.cc b/modules/rtp_rtcp/source/rtp_utility.cc
index 53a006d..44c671f 100644
--- a/modules/rtp_rtcp/source/rtp_utility.cc
+++ b/modules/rtp_rtcp/source/rtp_utility.cc
@@ -507,6 +507,10 @@
           RTC_LOG(WARNING)
               << "RtpGenericFrameDescriptor unsupported by rtp header parser.";
           break;
+        case kRtpExtensionColorSpace:
+          RTC_LOG(WARNING)
+              << "RtpExtensionColorSpace unsupported by rtp header parser.";
+          break;
         case kRtpExtensionNone:
         case kRtpExtensionNumberOfExtensions: {
           RTC_NOTREACHED() << "Invalid extension type: " << type;
diff --git a/modules/rtp_rtcp/source/rtp_video_header.cc b/modules/rtp_rtcp/source/rtp_video_header.cc
index a3ee8ba..bb9413d 100644
--- a/modules/rtp_rtcp/source/rtp_video_header.cc
+++ b/modules/rtp_rtcp/source/rtp_video_header.cc
@@ -12,7 +12,7 @@
 
 namespace webrtc {
 
-RTPVideoHeader::RTPVideoHeader() : playout_delay(), video_timing() {}
+RTPVideoHeader::RTPVideoHeader() : video_timing() {}
 RTPVideoHeader::RTPVideoHeader(const RTPVideoHeader& other) = default;
 RTPVideoHeader::~RTPVideoHeader() = default;
 
diff --git a/modules/rtp_rtcp/source/rtp_video_header.h b/modules/rtp_rtcp/source/rtp_video_header.h
index 4426c41..1c75f53 100644
--- a/modules/rtp_rtcp/source/rtp_video_header.h
+++ b/modules/rtp_rtcp/source/rtp_video_header.h
@@ -15,6 +15,7 @@
 #include "absl/container/inlined_vector.h"
 #include "absl/types/optional.h"
 #include "absl/types/variant.h"
+#include "api/video/video_codec_type.h"
 #include "api/video/video_content_type.h"
 #include "api/video/video_frame_marking.h"
 #include "api/video/video_rotation.h"
@@ -59,7 +60,7 @@
   uint8_t simulcastIdx = 0;
   VideoCodecType codec = VideoCodecType::kVideoCodecGeneric;
 
-  PlayoutDelay playout_delay;
+  PlayoutDelay playout_delay = {-1, -1};
   VideoSendTiming video_timing;
   FrameMarking frame_marking;
   RTPVideoTypeHeader video_type_header;
diff --git a/modules/rtp_rtcp/source/time_util.h b/modules/rtp_rtcp/source/time_util.h
index 1e01c94..94b9143 100644
--- a/modules/rtp_rtcp/source/time_util.h
+++ b/modules/rtp_rtcp/source/time_util.h
@@ -31,12 +31,6 @@
 // rtc::TimeMicros()
 int64_t NtpOffsetMs();
 
-// Converts NTP timestamp to RTP timestamp.
-inline uint32_t NtpToRtp(NtpTime ntp, uint32_t freq) {
-  uint32_t tmp = (static_cast<uint64_t>(ntp.fractions()) * freq) >> 32;
-  return ntp.seconds() * freq + tmp;
-}
-
 // Helper function for compact ntp representation:
 // RFC 3550, Section 4. Time Format.
 // Wallclock time is represented using the timestamp format of
diff --git a/rtc_base/BUILD.gn b/rtc_base/BUILD.gn
index f970278..c88815f 100644
--- a/rtc_base/BUILD.gn
+++ b/rtc_base/BUILD.gn
@@ -78,7 +78,6 @@
     ":safe_compare",
     ":safe_minmax",
     ":type_traits",
-    "..:webrtc_common",
     "../api:array_view",
     "../system_wrappers:field_trial",
     "experiments:field_trial_parser",
@@ -207,6 +206,9 @@
   sources = [
     "scoped_ref_ptr.h",
   ]
+  deps = [
+    "../api:scoped_refptr",
+  ]
 }
 
 rtc_source_set("refcount") {
@@ -241,6 +243,7 @@
     ":rtc_base_approved",
     ":rtc_task_queue_libevent",
     ":rtc_task_queue_win",
+    ":rtc_task_queue_stdlib",
     ":sequenced_task_checker",
   ]
   sources = [
@@ -376,6 +379,9 @@
   sources = [
     "sanitizer.h",
   ]
+  deps = [
+    "//third_party/abseil-cpp/absl/meta:type_traits",
+  ]
 }
 
 rtc_source_set("safe_compare") {
@@ -602,6 +608,26 @@
   }
 }
 
+rtc_source_set("rtc_task_queue_stdlib") {
+  visibility = [ ":rtc_task_queue_impl" ]
+  sources = [
+    "task_queue_stdlib.cc",
+  ]
+  deps = [
+    ":checks",
+    ":criticalsection",
+    ":logging",
+    ":macromagic",
+    ":platform_thread",
+    ":ptr_util",
+    ":refcount",
+    ":rtc_event",
+    ":rtc_task_queue_api",
+    ":safe_conversions",
+    ":timeutils",
+  ]
+}
+
 rtc_source_set("rtc_task_queue_impl") {
   visibility = [ "*" ]
   if (rtc_enable_libevent) {
@@ -615,9 +641,15 @@
       ]
     }
     if (is_win) {
-      deps = [
-        ":rtc_task_queue_win",
-      ]
+      if (current_os == "winuwp") {
+        deps = [
+          ":rtc_task_queue_stdlib",
+        ]
+      } else {
+        deps = [
+          ":rtc_task_queue_win",
+        ]
+      }
     }
   }
 }
@@ -838,11 +870,13 @@
     ]
 
     if (is_win) {
-      sources += [
-        "win32socketinit.h",
-        "win32socketserver.cc",
-        "win32socketserver.h",
-      ]
+      sources += [ "win32socketinit.h" ]
+      if (current_os != "winuwp") {
+        sources += [
+          "win32socketserver.cc",
+          "win32socketserver.h",
+        ]
+      }
     }
   }  # !build_with_chromium
 
@@ -932,11 +966,41 @@
   ]
 }
 
+rtc_source_set("gunit_helpers") {
+  testonly = true
+  sources = [
+    "gunit.cc",
+    "gunit.h",
+  ]
+  deps = [
+    ":logging",
+    ":rtc_base",
+    ":rtc_base_tests_utils",
+    ":stringutils",
+    "../test:test_support",
+  ]
+}
+
+rtc_source_set("testclient") {
+  testonly = true
+  sources = [
+    "testclient.cc",
+    "testclient.h",
+  ]
+  deps = [
+    ":criticalsection",
+    ":gunit_helpers",
+    ":macromagic",
+    ":rtc_base",
+    ":rtc_base_tests_utils",
+    ":timeutils",
+    "//third_party/abseil-cpp/absl/memory:memory",
+  ]
+}
+
 rtc_source_set("rtc_base_tests_utils") {
   testonly = true
   sources = [
-    # Also use this as a convenient dumping ground for misc files that are
-    # included by multiple targets below.
     "cpu_time.cc",
     "cpu_time.h",
     "fake_mdns_responder.h",
@@ -947,8 +1011,6 @@
     "fakesslidentity.h",
     "firewallsocketserver.cc",
     "firewallsocketserver.h",
-    "gunit.cc",
-    "gunit.h",
     "memory_stream.cc",
     "memory_stream.h",
     "memory_usage.cc",
@@ -967,8 +1029,6 @@
     "socketstream.h",
     "testbase64.h",
     "testcertificateverifier.h",
-    "testclient.cc",
-    "testclient.h",
     "testechoserver.cc",
     "testechoserver.h",
     "testutils.cc",
@@ -979,16 +1039,10 @@
   deps = [
     ":checks",
     ":rtc_base",
-    ":stringutils",
     "../api/units:time_delta",
-    "../test:test_support",
-    "system:fallthrough",
     "third_party/sigslot",
     "//third_party/abseil-cpp/absl/memory",
   ]
-  public_deps = [
-    "//testing/gtest",
-  ]
 }
 
 rtc_source_set("rtc_task_queue_for_test") {
@@ -1013,6 +1067,7 @@
       "sigslot_unittest.cc",
     ]
     deps = [
+      ":gunit_helpers",
       ":rtc_base",
       ":rtc_base_tests_utils",
       "third_party/sigslot",
@@ -1025,6 +1080,7 @@
       "unittest_main.cc",
     ]
     deps = [
+      ":gunit_helpers",
       ":rtc_base",
       ":rtc_base_approved",
       ":rtc_base_tests_utils",
@@ -1053,9 +1109,11 @@
     ]
     deps = [
       ":checks",
+      ":gunit_helpers",
       ":rtc_base",
       ":rtc_base_tests_main",
       ":rtc_base_tests_utils",
+      ":testclient",
       "../system_wrappers:system_wrappers",
       "../test:fileutils",
       "../test:test_support",
@@ -1122,6 +1180,7 @@
     }
     deps = [
       ":checks",
+      ":gunit_helpers",
       ":rate_limiter",
       ":rtc_base",
       ":rtc_base_approved",
@@ -1132,6 +1191,7 @@
       ":safe_minmax",
       ":sanitizer",
       ":stringutils",
+      ":testclient",
       "../api:array_view",
       "../system_wrappers:system_wrappers",
       "../test:fileutils",
@@ -1151,6 +1211,7 @@
       "task_queue_unittest.cc",
     ]
     deps = [
+      ":gunit_helpers",
       ":rtc_base_approved",
       ":rtc_base_tests_main",
       ":rtc_base_tests_utils",
@@ -1184,6 +1245,7 @@
       "weak_ptr_unittest.cc",
     ]
     deps = [
+      ":gunit_helpers",
       ":rtc_base_approved",
       ":rtc_base_tests_main",
       ":rtc_base_tests_utils",
@@ -1219,6 +1281,7 @@
       "strings/json_unittest.cc",
     ]
     deps = [
+      ":gunit_helpers",
       ":rtc_base_tests_main",
       ":rtc_base_tests_utils",
       ":rtc_json",
@@ -1270,9 +1333,11 @@
     }
     deps = [
       ":checks",
+      ":gunit_helpers",
       ":rtc_base_tests_main",
       ":rtc_base_tests_utils",
       ":stringutils",
+      ":testclient",
       "../api:array_view",
       "../test:fileutils",
       "../test:test_support",
diff --git a/rtc_base/experiments/BUILD.gn b/rtc_base/experiments/BUILD.gn
index b2e1302..d36a43e 100644
--- a/rtc_base/experiments/BUILD.gn
+++ b/rtc_base/experiments/BUILD.gn
@@ -56,7 +56,6 @@
   ]
   deps = [
     "../:rtc_base_approved",
-    "../..:webrtc_common",
     "../../api/video_codecs:video_codecs_api",
     "../../system_wrappers:field_trial",
     "//third_party/abseil-cpp/absl/types:optional",
@@ -130,6 +129,7 @@
       ":normalize_simulcast_size_experiment",
       ":quality_scaling_experiment",
       ":rtt_mult_experiment",
+      "..:gunit_helpers",
       "../:rtc_base_tests_main",
       "../:rtc_base_tests_utils",
       "../../system_wrappers:field_trial",
diff --git a/rtc_base/experiments/OWNERS b/rtc_base/experiments/OWNERS
new file mode 100644
index 0000000..9c55872
--- /dev/null
+++ b/rtc_base/experiments/OWNERS
@@ -0,0 +1,8 @@
+per-file alr_experiment*=sprang@webrtc.org
+per-file congestion_controller_experiment*=srte@webrtc.org
+per-file cpu_speed_experiment*=asapersson@webrtc.org
+per-file field_trial*=srte@webrtc.org
+per-file jitter_upper_bound_experiment*=sprang@webrtc.org
+per-file normalize_simulcast_size_experiment*=asapersson@webrtc.org
+per-file quality_scaling_experiment*=asapersson@webrtc.org
+per-file rtt_mult_experiment*=mhoro@webrtc.org
diff --git a/rtc_base/experiments/field_trial_parser.cc b/rtc_base/experiments/field_trial_parser.cc
index a2d7f97..936487c 100644
--- a/rtc_base/experiments/field_trial_parser.cc
+++ b/rtc_base/experiments/field_trial_parser.cc
@@ -169,6 +169,9 @@
 template class FieldTrialParameter<int>;
 template class FieldTrialParameter<std::string>;
 
+template class FieldTrialConstrained<double>;
+template class FieldTrialConstrained<int>;
+
 template class FieldTrialOptional<double>;
 template class FieldTrialOptional<int>;
 template class FieldTrialOptional<bool>;
diff --git a/rtc_base/experiments/field_trial_parser.h b/rtc_base/experiments/field_trial_parser.h
index 22a8889..8bdd9b5 100644
--- a/rtc_base/experiments/field_trial_parser.h
+++ b/rtc_base/experiments/field_trial_parser.h
@@ -92,6 +92,43 @@
   T value_;
 };
 
+// This class uses the ParseTypedParameter function to implement a parameter
+// implementation with an enforced default value and a range constraint. Values
+// outside the configured range will be ignored.
+template <typename T>
+class FieldTrialConstrained : public FieldTrialParameterInterface {
+ public:
+  FieldTrialConstrained(std::string key,
+                        T default_value,
+                        absl::optional<T> lower_limit,
+                        absl::optional<T> upper_limit)
+      : FieldTrialParameterInterface(key),
+        value_(default_value),
+        lower_limit_(lower_limit),
+        upper_limit_(upper_limit) {}
+  T Get() const { return value_; }
+  operator T() const { return Get(); }
+  const T* operator->() const { return &value_; }
+
+ protected:
+  bool Parse(absl::optional<std::string> str_value) override {
+    if (str_value) {
+      absl::optional<T> value = ParseTypedParameter<T>(*str_value);
+      if (value && (!lower_limit_ || *value >= *lower_limit_) &&
+          (!upper_limit_ || *value <= *upper_limit_)) {
+        value_ = *value;
+        return true;
+      }
+    }
+    return false;
+  }
+
+ private:
+  T value_;
+  absl::optional<T> lower_limit_;
+  absl::optional<T> upper_limit_;
+};
+
 class AbstractFieldTrialEnum : public FieldTrialParameterInterface {
  public:
   AbstractFieldTrialEnum(std::string key,
@@ -191,6 +228,9 @@
 // Using the given value as is.
 extern template class FieldTrialParameter<std::string>;
 
+extern template class FieldTrialConstrained<double>;
+extern template class FieldTrialConstrained<int>;
+
 extern template class FieldTrialOptional<double>;
 extern template class FieldTrialOptional<int>;
 extern template class FieldTrialOptional<bool>;
diff --git a/rtc_base/experiments/field_trial_parser_unittest.cc b/rtc_base/experiments/field_trial_parser_unittest.cc
index de977ec..0d067f5 100644
--- a/rtc_base/experiments/field_trial_parser_unittest.cc
+++ b/rtc_base/experiments/field_trial_parser_unittest.cc
@@ -99,6 +99,19 @@
   EXPECT_EQ(exp.ping.Get(), false);
   EXPECT_EQ(exp.hash.Get(), "a80");
 }
+TEST(FieldTrialParserTest, IgnoresOutOfRange) {
+  FieldTrialConstrained<double> low("low", 10, absl::nullopt, 100);
+  FieldTrialConstrained<double> high("high", 10, 5, absl::nullopt);
+  ParseFieldTrial({&low, &high}, "low:1000,high:0");
+  EXPECT_EQ(low.Get(), 10);
+  EXPECT_EQ(high.Get(), 10);
+  ParseFieldTrial({&low, &high}, "low:inf,high:nan");
+  EXPECT_EQ(low.Get(), 10);
+  EXPECT_EQ(high.Get(), 10);
+  ParseFieldTrial({&low, &high}, "low:20,high:20");
+  EXPECT_EQ(low.Get(), 20);
+  EXPECT_EQ(high.Get(), 20);
+}
 TEST(FieldTrialParserTest, ParsesOptionalParameters) {
   FieldTrialOptional<int> max_count("c", absl::nullopt);
   ParseFieldTrial({&max_count}, "");
diff --git a/rtc_base/experiments/field_trial_units.cc b/rtc_base/experiments/field_trial_units.cc
index 8c201a6..5311a3a 100644
--- a/rtc_base/experiments/field_trial_units.cc
+++ b/rtc_base/experiments/field_trial_units.cc
@@ -87,6 +87,10 @@
 template class FieldTrialParameter<DataSize>;
 template class FieldTrialParameter<TimeDelta>;
 
+template class FieldTrialConstrained<DataRate>;
+template class FieldTrialConstrained<DataSize>;
+template class FieldTrialConstrained<TimeDelta>;
+
 template class FieldTrialOptional<DataRate>;
 template class FieldTrialOptional<DataSize>;
 template class FieldTrialOptional<TimeDelta>;
diff --git a/rtc_base/experiments/field_trial_units.h b/rtc_base/experiments/field_trial_units.h
index 932c5cb..af88f4a 100644
--- a/rtc_base/experiments/field_trial_units.h
+++ b/rtc_base/experiments/field_trial_units.h
@@ -21,6 +21,10 @@
 extern template class FieldTrialParameter<DataSize>;
 extern template class FieldTrialParameter<TimeDelta>;
 
+extern template class FieldTrialConstrained<DataRate>;
+extern template class FieldTrialConstrained<DataSize>;
+extern template class FieldTrialConstrained<TimeDelta>;
+
 extern template class FieldTrialOptional<DataRate>;
 extern template class FieldTrialOptional<DataSize>;
 extern template class FieldTrialOptional<TimeDelta>;
diff --git a/rtc_base/experiments/field_trial_units_unittest.cc b/rtc_base/experiments/field_trial_units_unittest.cc
index 80771d9..57022c2 100644
--- a/rtc_base/experiments/field_trial_units_unittest.cc
+++ b/rtc_base/experiments/field_trial_units_unittest.cc
@@ -58,5 +58,25 @@
   EXPECT_EQ(*exp.max_buffer.GetOptional(), DataSize::bytes(8));
   EXPECT_EQ(exp.period.Get(), TimeDelta::ms(300));
 }
+TEST(FieldTrialParserUnitsTest, IgnoresOutOfRange) {
+  FieldTrialConstrained<DataRate> rate("r", DataRate::kbps(30),
+                                       DataRate::kbps(10), DataRate::kbps(100));
+  FieldTrialConstrained<TimeDelta> delta("d", TimeDelta::ms(30),
+                                         TimeDelta::ms(10), TimeDelta::ms(100));
+  FieldTrialConstrained<DataSize> size(
+      "s", DataSize::bytes(30), DataSize::bytes(10), DataSize::bytes(100));
+  ParseFieldTrial({&rate, &delta, &size}, "r:0,d:0,s:0");
+  EXPECT_EQ(rate->kbps(), 30);
+  EXPECT_EQ(delta->ms(), 30);
+  EXPECT_EQ(size->bytes(), 30);
+  ParseFieldTrial({&rate, &delta, &size}, "r:300,d:300,s:300");
+  EXPECT_EQ(rate->kbps(), 30);
+  EXPECT_EQ(delta->ms(), 30);
+  EXPECT_EQ(size->bytes(), 30);
+  ParseFieldTrial({&rate, &delta, &size}, "r:50,d:50,s:50");
+  EXPECT_EQ(rate->kbps(), 50);
+  EXPECT_EQ(delta->ms(), 50);
+  EXPECT_EQ(size->bytes(), 50);
+}
 
 }  // namespace webrtc
diff --git a/rtc_base/experiments/quality_scaling_experiment.h b/rtc_base/experiments/quality_scaling_experiment.h
index 80a25ef..14833c0 100644
--- a/rtc_base/experiments/quality_scaling_experiment.h
+++ b/rtc_base/experiments/quality_scaling_experiment.h
@@ -12,7 +12,6 @@
 
 #include "absl/types/optional.h"
 #include "api/video_codecs/video_encoder.h"
-#include "common_types.h"  // NOLINT(build/include)
 
 namespace webrtc {
 class QualityScalingExperiment {
diff --git a/rtc_base/filerotatingstream.cc b/rtc_base/filerotatingstream.cc
index d03ab39..b1dc5ff 100644
--- a/rtc_base/filerotatingstream.cc
+++ b/rtc_base/filerotatingstream.cc
@@ -21,6 +21,7 @@
 #else
 #include <dirent.h>
 #include <sys/stat.h>
+#include <unistd.h>
 #endif  // WEBRTC_WIN
 
 #include "absl/strings/match.h"
diff --git a/rtc_base/httpcommon.cc b/rtc_base/httpcommon.cc
index 7926f88..4fa5f41 100644
--- a/rtc_base/httpcommon.cc
+++ b/rtc_base/httpcommon.cc
@@ -38,7 +38,7 @@
 
 namespace rtc {
 namespace {
-#if defined(WEBRTC_WIN)
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
 ///////////////////////////////////////////////////////////////////////////////
 // ConstantToLabel can be used to easily generate string names from constant
 // values.  This can be useful for logging descriptive names of error messages.
@@ -113,7 +113,7 @@
     LASTLABEL};
 #undef KLABEL
 #undef LASTLABEL
-#endif  // defined(WEBRTC_WIN)
+#endif  // defined(WEBRTC_WIN) && !defined(WINUWP)
 
 typedef std::pair<std::string, std::string> HttpAttribute;
 typedef std::vector<HttpAttribute> HttpAttributeList;
@@ -225,7 +225,7 @@
   return result;
 }
 
-#if defined(WEBRTC_WIN)
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
 struct NegotiateAuthContext : public HttpAuthContext {
   CredHandle cred;
   CtxtHandle ctx;
@@ -244,7 +244,7 @@
     FreeCredentialsHandle(&cred);
   }
 };
-#endif  // WEBRTC_WIN
+#endif  // defined(WEBRTC_WIN) && !defined(WINUWP)
 
 }  // anonymous namespace
 
@@ -359,7 +359,7 @@
     return HAR_RESPONSE;
   }
 
-#if defined(WEBRTC_WIN)
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
 #if 1
   bool want_negotiate = absl::EqualsIgnoreCase(auth_method, "negotiate");
   bool want_ntlm = absl::EqualsIgnoreCase(auth_method, "ntlm");
@@ -544,7 +544,7 @@
     return HAR_RESPONSE;
   }
 #endif
-#endif  // WEBRTC_WIN
+#endif  // defined(WEBRTC_WIN) && !defined(WINUWP)
 
   return HAR_IGNORE;
 }
diff --git a/rtc_base/logging.cc b/rtc_base/logging.cc
index bb4fbfa..8d6afbc 100644
--- a/rtc_base/logging.cc
+++ b/rtc_base/logging.cc
@@ -327,7 +327,7 @@
     }
   }
 
-#if defined(WEBRTC_WIN)
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
   if ((LS_NONE != debug_level) && !::IsDebuggerPresent()) {
     // First, attempt to attach to our parent's console... so if you invoke
     // from the command line, we'll see the output there.  Otherwise, create
@@ -336,7 +336,7 @@
     if (!AttachConsole(ATTACH_PARENT_PROCESS))
       ::AllocConsole();
   }
-#endif  // WEBRTC_WIN
+#endif  // defined(WEBRTC_WIN) && !defined(WINUWP)
 
   LogToDebug(debug_level);
 }
diff --git a/rtc_base/logging.h b/rtc_base/logging.h
index c15c37a..c7d083e 100644
--- a/rtc_base/logging.h
+++ b/rtc_base/logging.h
@@ -519,10 +519,10 @@
       ? static_cast<void>(0)               \
       : rtc::webrtc_logging_impl::LogMessageVoidify()&
 
-#define RTC_LOG_FILE_LINE(sev, file, line)                                     \
-  rtc::webrtc_logging_impl::LogCall() &                                        \
-      rtc::webrtc_logging_impl::LogStreamer<>()                                \
-          << rtc::webrtc_logging_impl::LogMetadata(__FILE__, __LINE__, sev)
+#define RTC_LOG_FILE_LINE(sev, file, line)      \
+  rtc::webrtc_logging_impl::LogCall() &         \
+      rtc::webrtc_logging_impl::LogStreamer<>() \
+          << rtc::webrtc_logging_impl::LogMetadata(file, line, sev)
 
 #define RTC_LOG(sev) RTC_LOG_FILE_LINE(rtc::sev, __FILE__, __LINE__)
 
diff --git a/rtc_base/nethelpers.cc b/rtc_base/nethelpers.cc
index 81cd1af..1bba3b4 100644
--- a/rtc_base/nethelpers.cc
+++ b/rtc_base/nethelpers.cc
@@ -161,7 +161,10 @@
 }
 
 bool HasIPv6Enabled() {
-#if defined(WEBRTC_WIN)
+#if defined(WINUWP)
+  // WinUWP always has IPv6 capability.
+  return true;
+#elif defined(WEBRTC_WIN)
   if (IsWindowsVistaOrLater()) {
     return true;
   }
diff --git a/rtc_base/proxy_unittest.cc b/rtc_base/proxy_unittest.cc
index f42039f..0101893 100644
--- a/rtc_base/proxy_unittest.cc
+++ b/rtc_base/proxy_unittest.cc
@@ -18,7 +18,6 @@
 #include "rtc_base/virtualsocketserver.h"
 
 using rtc::Socket;
-using rtc::Thread;
 using rtc::SocketAddress;
 
 static const SocketAddress kSocksProxyIntAddr("1.2.3.4", 1080);
@@ -49,7 +48,8 @@
       socket, kSocksProxyIntAddr, "", rtc::CryptString());
   // TODO: IPv6-ize these tests when proxy supports IPv6.
 
-  rtc::TestEchoServer server(Thread::Current(), SocketAddress(INADDR_ANY, 0));
+  rtc::TestEchoServer server(rtc::Thread::Current(),
+                             SocketAddress(INADDR_ANY, 0));
 
   std::unique_ptr<rtc::AsyncTCPSocket> packet_socket(
       rtc::AsyncTCPSocket::Create(proxy_socket, SocketAddress(INADDR_ANY, 0),
diff --git a/rtc_base/sanitizer.h b/rtc_base/sanitizer.h
index a9eccfc..8af0824 100644
--- a/rtc_base/sanitizer.h
+++ b/rtc_base/sanitizer.h
@@ -14,7 +14,7 @@
 #include <stddef.h>  // For size_t.
 
 #ifdef __cplusplus
-#include <type_traits>
+#include "absl/meta/type_traits.h"
 #endif
 
 #if defined(__has_feature)
@@ -98,10 +98,10 @@
 
 template <typename T>
 constexpr bool IsTriviallyCopyable() {
-  return static_cast<bool>(std::is_trivially_copy_constructible<T>::value &&
-                           (std::is_trivially_copy_assignable<T>::value ||
+  return static_cast<bool>(absl::is_trivially_copy_constructible<T>::value &&
+                           (absl::is_trivially_copy_assignable<T>::value ||
                             !std::is_copy_assignable<T>::value) &&
-                           std::is_trivially_destructible<T>::value);
+                           absl::is_trivially_destructible<T>::value);
 }
 
 }  // namespace sanitizer_impl
diff --git a/rtc_base/scoped_ref_ptr.h b/rtc_base/scoped_ref_ptr.h
index a583aa9..b961ff5 100644
--- a/rtc_base/scoped_ref_ptr.h
+++ b/rtc_base/scoped_ref_ptr.h
@@ -8,154 +8,12 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-// Originally these classes are from Chromium.
-// http://src.chromium.org/viewvc/chrome/trunk/src/base/memory/ref_counted.h?view=markup
-
-//
-// A smart pointer class for reference counted objects.  Use this class instead
-// of calling AddRef and Release manually on a reference counted object to
-// avoid common memory leaks caused by forgetting to Release an object
-// reference.  Sample usage:
-//
-//   class MyFoo : public RefCounted<MyFoo> {
-//    ...
-//   };
-//
-//   void some_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     foo->Method(param);
-//     // |foo| is released when this function returns
-//   }
-//
-//   void some_other_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     ...
-//     foo = nullptr;  // explicitly releases |foo|
-//     ...
-//     if (foo)
-//       foo->Method(param);
-//   }
-//
-// The above examples show how scoped_refptr<T> acts like a pointer to T.
-// Given two scoped_refptr<T> classes, it is also possible to exchange
-// references between the two objects, like so:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b.swap(a);
-//     // now, |b| references the MyFoo object, and |a| references null.
-//   }
-//
-// To make both |a| and |b| in the above example reference the same MyFoo
-// object, simply use the assignment operator:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b = a;
-//     // now, |a| and |b| each own a reference to the same MyFoo object.
-//   }
-//
-
 #ifndef RTC_BASE_SCOPED_REF_PTR_H_
 #define RTC_BASE_SCOPED_REF_PTR_H_
 
-#include <memory>
+// TODO(bugs.webrtc.org/9887): This is a forward header for backwards
+// compatibility. Remove when downstream clients are updated.
 
-namespace rtc {
-
-template <class T>
-class scoped_refptr {
- public:
-  scoped_refptr() : ptr_(nullptr) {}
-
-  scoped_refptr(T* p) : ptr_(p) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  template <typename U>
-  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  // Move constructors.
-  scoped_refptr(scoped_refptr<T>&& r) : ptr_(r.release()) {}
-
-  template <typename U>
-  scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.release()) {}
-
-  ~scoped_refptr() {
-    if (ptr_)
-      ptr_->Release();
-  }
-
-  T* get() const { return ptr_; }
-  operator T*() const { return ptr_; }
-  T* operator->() const { return ptr_; }
-
-  // Returns the (possibly null) raw pointer, and makes the scoped_refptr hold a
-  // null pointer, all without touching the reference count of the underlying
-  // pointed-to object. The object is still reference counted, and the caller of
-  // release() is now the proud owner of one reference, so it is responsible for
-  // calling Release() once on the object when no longer using it.
-  T* release() {
-    T* retVal = ptr_;
-    ptr_ = nullptr;
-    return retVal;
-  }
-
-  scoped_refptr<T>& operator=(T* p) {
-    // AddRef first so that self assignment should work
-    if (p)
-      p->AddRef();
-    if (ptr_)
-      ptr_->Release();
-    ptr_ = p;
-    return *this;
-  }
-
-  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
-    return *this = r.ptr_;
-  }
-
-  template <typename U>
-  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
-    return *this = r.get();
-  }
-
-  scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
-    scoped_refptr<T>(std::move(r)).swap(*this);
-    return *this;
-  }
-
-  template <typename U>
-  scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
-    scoped_refptr<T>(std::move(r)).swap(*this);
-    return *this;
-  }
-
-  void swap(T** pp) {
-    T* p = ptr_;
-    ptr_ = *pp;
-    *pp = p;
-  }
-
-  void swap(scoped_refptr<T>& r) { swap(&r.ptr_); }
-
- protected:
-  T* ptr_;
-};
-
-}  // namespace rtc
+#include "api/scoped_refptr.h"
 
 #endif  // RTC_BASE_SCOPED_REF_PTR_H_
diff --git a/rtc_base/socketadapters.cc b/rtc_base/socketadapters.cc
index 9451928..3bac17b 100644
--- a/rtc_base/socketadapters.cc
+++ b/rtc_base/socketadapters.cc
@@ -427,7 +427,7 @@
         // std::string msg("Please report the following information to
         // foo@bar.com:\r\nUnknown methods: ");
         msg.append(unknown_mechanisms_);
-#if defined(WEBRTC_WIN)
+#if defined(WEBRTC_WIN) && !defined(WINUWP)
         MessageBoxA(0, msg.c_str(), "Oops!", MB_OK);
 #endif
 #if defined(WEBRTC_POSIX)
diff --git a/rtc_base/synchronization/rw_lock_win.cc b/rtc_base/synchronization/rw_lock_win.cc
index 44cc0a7..a0d24a3 100644
--- a/rtc_base/synchronization/rw_lock_win.cc
+++ b/rtc_base/synchronization/rw_lock_win.cc
@@ -14,23 +14,19 @@
 
 namespace webrtc {
 
-static bool native_rw_locks_supported = false;
-static bool module_load_attempted = false;
-static HMODULE library = NULL;
+typedef void(WINAPI* PInitializeSRWLock)(PSRWLOCK);
 
-typedef void(WINAPI* InitializeSRWLock)(PSRWLOCK);
+typedef void(WINAPI* PAcquireSRWLockExclusive)(PSRWLOCK);
+typedef void(WINAPI* PReleaseSRWLockExclusive)(PSRWLOCK);
 
-typedef void(WINAPI* AcquireSRWLockExclusive)(PSRWLOCK);
-typedef void(WINAPI* ReleaseSRWLockExclusive)(PSRWLOCK);
+typedef void(WINAPI* PAcquireSRWLockShared)(PSRWLOCK);
+typedef void(WINAPI* PReleaseSRWLockShared)(PSRWLOCK);
 
-typedef void(WINAPI* AcquireSRWLockShared)(PSRWLOCK);
-typedef void(WINAPI* ReleaseSRWLockShared)(PSRWLOCK);
-
-InitializeSRWLock initialize_srw_lock;
-AcquireSRWLockExclusive acquire_srw_lock_exclusive;
-AcquireSRWLockShared acquire_srw_lock_shared;
-ReleaseSRWLockShared release_srw_lock_shared;
-ReleaseSRWLockExclusive release_srw_lock_exclusive;
+PInitializeSRWLock initialize_srw_lock;
+PAcquireSRWLockExclusive acquire_srw_lock_exclusive;
+PAcquireSRWLockShared acquire_srw_lock_shared;
+PReleaseSRWLockShared release_srw_lock_shared;
+PReleaseSRWLockExclusive release_srw_lock_exclusive;
 
 RWLockWin::RWLockWin() {
   initialize_srw_lock(&lock_);
@@ -60,28 +56,31 @@
 }
 
 bool RWLockWin::LoadModule() {
+  static bool module_load_attempted = false;
+  static bool native_rw_locks_supported = false;
   if (module_load_attempted) {
     return native_rw_locks_supported;
   }
   module_load_attempted = true;
+#if !defined(WINUWP)
   // Use native implementation if supported (i.e Vista+)
-  library = LoadLibrary(TEXT("Kernel32.dll"));
+  static HMODULE library = LoadLibrary(TEXT("Kernel32.dll"));
   if (!library) {
     return false;
   }
   RTC_LOG(LS_VERBOSE) << "Loaded Kernel.dll";
 
   initialize_srw_lock =
-      (InitializeSRWLock)GetProcAddress(library, "InitializeSRWLock");
+      (PInitializeSRWLock)GetProcAddress(library, "InitializeSRWLock");
 
-  acquire_srw_lock_exclusive = (AcquireSRWLockExclusive)GetProcAddress(
+  acquire_srw_lock_exclusive = (PAcquireSRWLockExclusive)GetProcAddress(
       library, "AcquireSRWLockExclusive");
-  release_srw_lock_exclusive = (ReleaseSRWLockExclusive)GetProcAddress(
+  release_srw_lock_exclusive = (PReleaseSRWLockExclusive)GetProcAddress(
       library, "ReleaseSRWLockExclusive");
   acquire_srw_lock_shared =
-      (AcquireSRWLockShared)GetProcAddress(library, "AcquireSRWLockShared");
+      (PAcquireSRWLockShared)GetProcAddress(library, "AcquireSRWLockShared");
   release_srw_lock_shared =
-      (ReleaseSRWLockShared)GetProcAddress(library, "ReleaseSRWLockShared");
+      (PReleaseSRWLockShared)GetProcAddress(library, "ReleaseSRWLockShared");
 
   if (initialize_srw_lock && acquire_srw_lock_exclusive &&
       release_srw_lock_exclusive && acquire_srw_lock_shared &&
@@ -89,6 +88,18 @@
     RTC_LOG(LS_VERBOSE) << "Loaded Native RW Lock";
     native_rw_locks_supported = true;
   }
+#else
+  // On WinUWP the symbols loaded from this library are directly present
+  // in the headers and thus loading the library is not required (and
+  // manually loading libraries is restricted due to WinUWP sandboxing).
+  initialize_srw_lock = InitializeSRWLock;
+  acquire_srw_lock_exclusive = AcquireSRWLockExclusive;
+  release_srw_lock_exclusive = ReleaseSRWLockExclusive;
+  acquire_srw_lock_shared = AcquireSRWLockShared;
+  release_srw_lock_shared = ReleaseSRWLockShared;
+
+  native_rw_locks_supported = true;
+#endif  // !defined(WINUWP)
   return native_rw_locks_supported;
 }
 
diff --git a/rtc_base/task_queue_stdlib.cc b/rtc_base/task_queue_stdlib.cc
new file mode 100644
index 0000000..0fb0ed2
--- /dev/null
+++ b/rtc_base/task_queue_stdlib.cc
@@ -0,0 +1,399 @@
+/*
+ *  Copyright 2018 The WebRTC Project Authors. All rights reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/task_queue.h"
+
+#include <string.h>
+#include <algorithm>
+#include <atomic>
+#include <condition_variable>
+#include <map>
+#include <queue>
+#include <utility>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/criticalsection.h"
+#include "rtc_base/event.h"
+#include "rtc_base/logging.h"
+#include "rtc_base/platform_thread.h"
+#include "rtc_base/refcount.h"
+#include "rtc_base/refcountedobject.h"
+#include "rtc_base/thread_annotations.h"
+#include "rtc_base/timeutils.h"
+
+namespace rtc {
+namespace {
+
+using Priority = TaskQueue::Priority;
+
+ThreadPriority TaskQueuePriorityToThreadPriority(Priority priority) {
+  switch (priority) {
+    case Priority::HIGH:
+      return kRealtimePriority;
+    case Priority::LOW:
+      return kLowPriority;
+    case Priority::NORMAL:
+      return kNormalPriority;
+    default:
+      RTC_NOTREACHED();
+      return kNormalPriority;
+  }
+  return kNormalPriority;
+}
+
+}  // namespace
+
+class TaskQueue::Impl : public RefCountInterface {
+ public:
+  Impl(const char* queue_name, TaskQueue* queue, Priority priority);
+  ~Impl() override;
+
+  static TaskQueue::Impl* Current();
+  static TaskQueue* CurrentQueue();
+
+  // Used for DCHECKing the current queue.
+  bool IsCurrent() const;
+
+  template <class Closure,
+            typename std::enable_if<!std::is_convertible<
+                Closure,
+                std::unique_ptr<QueuedTask>>::value>::type* = nullptr>
+  void PostTask(Closure&& closure) {
+    PostTask(NewClosure(std::forward<Closure>(closure)));
+  }
+
+  void PostTask(std::unique_ptr<QueuedTask> task);
+  void PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                        std::unique_ptr<QueuedTask> reply,
+                        TaskQueue::Impl* reply_queue);
+
+  void PostDelayedTask(std::unique_ptr<QueuedTask> task, uint32_t milliseconds);
+
+  class WorkerThread : public PlatformThread {
+   public:
+    WorkerThread(ThreadRunFunction func,
+                 void* obj,
+                 const char* thread_name,
+                 ThreadPriority priority)
+        : PlatformThread(func, obj, thread_name, priority) {}
+  };
+
+  using OrderId = uint64_t;
+
+  struct DelayedEntryTimeout {
+    int64_t next_fire_at_ms_{};
+    OrderId order_{};
+
+    bool operator<(const DelayedEntryTimeout& o) const {
+      return std::tie(next_fire_at_ms_, order_) <
+             std::tie(o.next_fire_at_ms_, o.order_);
+    }
+  };
+
+  struct NextTask {
+    bool final_task_{false};
+    std::unique_ptr<QueuedTask> run_task_;
+    int64_t sleep_time_ms_{};
+  };
+
+ protected:
+  NextTask GetNextTask();
+
+ private:
+  // The ThreadQueue::Current() method requires that the current thread
+  // returns the task queue if the current thread is the active task
+  // queue and this variable holds the value needed in thread_local to
+  // on the initialized worker thread holding the queue.
+  static thread_local TaskQueue::Impl* thread_context_;
+
+  static void ThreadMain(void* context);
+
+  void ProcessTasks();
+
+  void NotifyWake();
+
+  // The back pointer from the owner task queue object
+  // from this implementation detail.
+  TaskQueue* const queue_;
+
+  // Indicates if the thread has started.
+  Event started_;
+
+  // Indicates if the thread has stopped.
+  Event stopped_;
+
+  // Signaled whenever a new task is pending.
+  Event flag_notify_;
+
+  // Contains the active worker thread assigned to processing
+  // tasks (including delayed tasks).
+  WorkerThread thread_;
+
+  rtc::CriticalSection pending_lock_;
+
+  // Indicates if the worker thread needs to shutdown now.
+  bool thread_should_quit_ RTC_GUARDED_BY(pending_lock_){false};
+
+  // Holds the next order to use for the next task to be
+  // put into one of the pending queues.
+  OrderId thread_posting_order_ RTC_GUARDED_BY(pending_lock_){};
+
+  // The list of all pending tasks that need to be processed in the
+  // FIFO queue ordering on the worker thread.
+  std::queue<std::pair<OrderId, std::unique_ptr<QueuedTask>>> pending_queue_
+      RTC_GUARDED_BY(pending_lock_);
+
+  // The list of all pending tasks that need to be processed at a future
+  // time based upon a delay. On the off change the delayed task should
+  // happen at exactly the same time interval as another task then the
+  // task is processed based on FIFO ordering. std::priority_queue was
+  // considered but rejected due to its inability to extract the
+  // std::unique_ptr out of the queue without the presence of a hack.
+  std::map<DelayedEntryTimeout, std::unique_ptr<QueuedTask>> delayed_queue_
+      RTC_GUARDED_BY(pending_lock_);
+};
+
+// static
+thread_local TaskQueue::Impl* TaskQueue::Impl::thread_context_ = nullptr;
+
+TaskQueue::Impl::Impl(const char* queue_name,
+                      TaskQueue* queue,
+                      Priority priority)
+    : queue_(queue),
+      started_(/*manual_reset=*/false, /*initially_signaled=*/false),
+      stopped_(/*manual_reset=*/false, /*initially_signaled=*/false),
+      flag_notify_(/*manual_reset=*/false, /*initially_signaled=*/false),
+      thread_(&TaskQueue::Impl::ThreadMain,
+              this,
+              queue_name,
+              TaskQueuePriorityToThreadPriority(priority)) {
+  RTC_DCHECK(queue_name);
+  thread_.Start();
+  started_.Wait(Event::kForever);
+}
+
+TaskQueue::Impl::~Impl() {
+  RTC_DCHECK(!IsCurrent());
+
+  {
+    CritScope lock(&pending_lock_);
+    thread_should_quit_ = true;
+  }
+
+  NotifyWake();
+
+  stopped_.Wait(Event::kForever);
+  thread_.Stop();
+}
+
+// static
+TaskQueue::Impl* TaskQueue::Impl::Current() {
+  return thread_context_;
+}
+
+// static
+TaskQueue* TaskQueue::Impl::CurrentQueue() {
+  TaskQueue::Impl* current = Current();
+  return current ? current->queue_ : nullptr;
+}
+
+bool TaskQueue::Impl::IsCurrent() const {
+  return IsThreadRefEqual(thread_.GetThreadRef(), CurrentThreadRef());
+}
+
+void TaskQueue::Impl::PostTask(std::unique_ptr<QueuedTask> task) {
+  {
+    CritScope lock(&pending_lock_);
+    OrderId order = thread_posting_order_++;
+
+    pending_queue_.push(std::pair<OrderId, std::unique_ptr<QueuedTask>>(
+        order, std::move(task)));
+  }
+
+  NotifyWake();
+}
+
+void TaskQueue::Impl::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                      uint32_t milliseconds) {
+  auto fire_at = rtc::TimeMillis() + milliseconds;
+
+  DelayedEntryTimeout delay;
+  delay.next_fire_at_ms_ = fire_at;
+
+  {
+    CritScope lock(&pending_lock_);
+    delay.order_ = ++thread_posting_order_;
+    delayed_queue_[delay] = std::move(task);
+  }
+
+  NotifyWake();
+}
+
+void TaskQueue::Impl::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                       std::unique_ptr<QueuedTask> reply,
+                                       TaskQueue::Impl* reply_queue) {
+  QueuedTask* task_ptr = task.release();
+  QueuedTask* reply_task_ptr = reply.release();
+  PostTask([task_ptr, reply_task_ptr, reply_queue]() {
+    if (task_ptr->Run())
+      delete task_ptr;
+
+    reply_queue->PostTask(std::unique_ptr<QueuedTask>(reply_task_ptr));
+  });
+}
+
+TaskQueue::Impl::NextTask TaskQueue::Impl::GetNextTask() {
+  NextTask result{};
+
+  auto tick = rtc::TimeMillis();
+
+  CritScope lock(&pending_lock_);
+
+  if (thread_should_quit_) {
+    result.final_task_ = true;
+    return result;
+  }
+
+  if (delayed_queue_.size() > 0) {
+    auto delayed_entry = delayed_queue_.begin();
+    const auto& delay_info = delayed_entry->first;
+    auto& delay_run = delayed_entry->second;
+    if (tick >= delay_info.next_fire_at_ms_) {
+      if (pending_queue_.size() > 0) {
+        auto& entry = pending_queue_.front();
+        auto& entry_order = entry.first;
+        auto& entry_run = entry.second;
+        if (entry_order < delay_info.order_) {
+          result.run_task_ = std::move(entry_run);
+          pending_queue_.pop();
+          return result;
+        }
+      }
+
+      result.run_task_ = std::move(delay_run);
+      delayed_queue_.erase(delayed_entry);
+      return result;
+    }
+
+    result.sleep_time_ms_ = delay_info.next_fire_at_ms_ - tick;
+  }
+
+  if (pending_queue_.size() > 0) {
+    auto& entry = pending_queue_.front();
+    result.run_task_ = std::move(entry.second);
+    pending_queue_.pop();
+  }
+
+  return result;
+}
+
+// static
+void TaskQueue::Impl::ThreadMain(void* context) {
+  TaskQueue::Impl* me = static_cast<TaskQueue::Impl*>(context);
+  me->ProcessTasks();
+}
+
+void TaskQueue::Impl::ProcessTasks() {
+  thread_context_ = this;
+  started_.Set();
+
+  while (true) {
+    auto task = GetNextTask();
+
+    if (task.final_task_)
+      break;
+
+    if (task.run_task_) {
+      // process entry immediately then try again
+      QueuedTask* release_ptr = task.run_task_.release();
+      if (release_ptr->Run())
+        delete release_ptr;
+
+      // attempt to sleep again
+      continue;
+    }
+
+    if (0 == task.sleep_time_ms_)
+      flag_notify_.Wait(Event::kForever);
+    else
+      flag_notify_.Wait(task.sleep_time_ms_);
+  }
+
+  stopped_.Set();
+}
+
+void TaskQueue::Impl::NotifyWake() {
+  // The queue holds pending tasks to complete. Either tasks are to be
+  // executed immediately or tasks are to be run at some future delayed time.
+  // For immediate tasks the task queue's thread is busy running the task and
+  // the thread will not be waiting on the flag_notify_ event. If no immediate
+  // tasks are available but a delayed task is pending then the thread will be
+  // waiting on flag_notify_ with a delayed time-out of the nearest timed task
+  // to run. If no immediate or pending tasks are available, the thread will
+  // wait on flag_notify_ until signaled that a task has been added (or the
+  // thread to be told to shutdown).
+
+  // In all cases, when a new immediate task, delayed task, or request to
+  // shutdown the thread is added the flag_notify_ is signaled after. If the
+  // thread was waiting then the thread will wake up immediately and re-assess
+  // what task needs to be run next (i.e. run a task now, wait for the nearest
+  // timed delayed task, or shutdown the thread). If the thread was not waiting
+  // then the thread will remained signaled to wake up the next time any
+  // attempt to wait on the flag_notify_ event occurs.
+
+  // Any immediate or delayed pending task (or request to shutdown the thread)
+  // must always be added to the queue prior to signaling flag_notify_ to wake
+  // up the possibly sleeping thread. This prevents a race condition where the
+  // thread is notified to wake up but the task queue's thread finds nothing to
+  // do so it waits once again to be signaled where such a signal may never
+  // happen.
+  flag_notify_.Set();
+}
+
+// Boilerplate for the PIMPL pattern.
+TaskQueue::TaskQueue(const char* queue_name, Priority priority)
+    : impl_(new RefCountedObject<TaskQueue::Impl>(queue_name, this, priority)) {
+}
+
+TaskQueue::~TaskQueue() {}
+
+// static
+TaskQueue* TaskQueue::Current() {
+  return TaskQueue::Impl::CurrentQueue();
+}
+
+// Used for DCHECKing the current queue.
+bool TaskQueue::IsCurrent() const {
+  return impl_->IsCurrent();
+}
+
+void TaskQueue::PostTask(std::unique_ptr<QueuedTask> task) {
+  return TaskQueue::impl_->PostTask(std::move(task));
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply,
+                                 TaskQueue* reply_queue) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            reply_queue->impl_.get());
+}
+
+void TaskQueue::PostTaskAndReply(std::unique_ptr<QueuedTask> task,
+                                 std::unique_ptr<QueuedTask> reply) {
+  return TaskQueue::impl_->PostTaskAndReply(std::move(task), std::move(reply),
+                                            impl_.get());
+}
+
+void TaskQueue::PostDelayedTask(std::unique_ptr<QueuedTask> task,
+                                uint32_t milliseconds) {
+  return TaskQueue::impl_->PostDelayedTask(std::move(task), milliseconds);
+}
+
+}  // namespace rtc
diff --git a/rtc_base/testutils.h b/rtc_base/testutils.h
index 6b3733f..ac74203 100644
--- a/rtc_base/testutils.h
+++ b/rtc_base/testutils.h
@@ -19,7 +19,6 @@
 #include <vector>
 #include "rtc_base/asyncsocket.h"
 #include "rtc_base/checks.h"
-#include "rtc_base/gunit.h"
 #include "rtc_base/stream.h"
 
 namespace webrtc {
diff --git a/rtc_base/timeutils.cc b/rtc_base/timeutils.cc
index dc5b611..0f79a87 100644
--- a/rtc_base/timeutils.cc
+++ b/rtc_base/timeutils.cc
@@ -45,6 +45,104 @@
   return g_clock;
 }
 
+#if defined(WINUWP)
+
+namespace {
+
+class TimeHelper final {
+ public:
+  TimeHelper(const TimeHelper&) = delete;
+
+  // Resets the clock based upon an NTP server. This routine must be called
+  // prior to the main system start-up to ensure all clocks are based upon
+  // an NTP server time if NTP synchronization is required. No critical
+  // section is used thus this method must be called prior to any clock
+  // routines being used.
+  static void SyncWithNtp(int64_t ntp_server_time_ms) {
+    auto& singleton = Singleton();
+    TIME_ZONE_INFORMATION time_zone;
+    GetTimeZoneInformation(&time_zone);
+    int64_t time_zone_bias_ns =
+        rtc::dchecked_cast<int64_t>(time_zone.Bias) * 60 * 1000 * 1000 * 1000;
+    singleton.app_start_time_ns_ =
+        (ntp_server_time_ms - kNTPTimeToUnixTimeEpochOffset) * 1000000 -
+        time_zone_bias_ns;
+    singleton.UpdateReferenceTime();
+  }
+
+  // Returns the number of nanoseconds that have passed since unix epoch.
+  static int64_t TicksNs() {
+    auto& singleton = Singleton();
+    int64_t result = 0;
+    LARGE_INTEGER qpcnt;
+    QueryPerformanceCounter(&qpcnt);
+    result = rtc::dchecked_cast<int64_t>(
+        (rtc::dchecked_cast<uint64_t>(qpcnt.QuadPart) * 100000 /
+         rtc::dchecked_cast<uint64_t>(singleton.os_ticks_per_second_)) *
+        10000);
+    result = singleton.app_start_time_ns_ + result -
+             singleton.time_since_os_start_ns_;
+    return result;
+  }
+
+ private:
+  TimeHelper() {
+    TIME_ZONE_INFORMATION time_zone;
+    GetTimeZoneInformation(&time_zone);
+    int64_t time_zone_bias_ns =
+        rtc::dchecked_cast<int64_t>(time_zone.Bias) * 60 * 1000 * 1000 * 1000;
+    FILETIME ft;
+    // This will give us system file in UTC format.
+    GetSystemTimeAsFileTime(&ft);
+    LARGE_INTEGER li;
+    li.HighPart = ft.dwHighDateTime;
+    li.LowPart = ft.dwLowDateTime;
+
+    app_start_time_ns_ = (li.QuadPart - kFileTimeToUnixTimeEpochOffset) * 100 -
+                         time_zone_bias_ns;
+
+    UpdateReferenceTime();
+  }
+
+  static TimeHelper& Singleton() {
+    static TimeHelper singleton;
+    return singleton;
+  }
+
+  void UpdateReferenceTime() {
+    LARGE_INTEGER qpfreq;
+    QueryPerformanceFrequency(&qpfreq);
+    os_ticks_per_second_ = rtc::dchecked_cast<int64_t>(qpfreq.QuadPart);
+
+    LARGE_INTEGER qpcnt;
+    QueryPerformanceCounter(&qpcnt);
+    time_since_os_start_ns_ = rtc::dchecked_cast<int64_t>(
+        (rtc::dchecked_cast<uint64_t>(qpcnt.QuadPart) * 100000 /
+         rtc::dchecked_cast<uint64_t>(os_ticks_per_second_)) *
+        10000);
+  }
+
+ private:
+  static constexpr uint64_t kFileTimeToUnixTimeEpochOffset =
+      116444736000000000ULL;
+  static constexpr uint64_t kNTPTimeToUnixTimeEpochOffset = 2208988800000L;
+
+  // The number of nanoseconds since unix system epoch
+  int64_t app_start_time_ns_;
+  // The number of nanoseconds since the OS started
+  int64_t time_since_os_start_ns_;
+  // The OS calculated ticks per second
+  int64_t os_ticks_per_second_;
+};
+
+}  // namespace
+
+void SyncWithNtp(int64_t time_from_ntp_server_ms) {
+  TimeHelper::SyncWithNtp(time_from_ntp_server_ms);
+}
+
+#endif  // defined(WINUWP)
+
 int64_t SystemTimeNanos() {
   int64_t ticks;
 #if defined(WEBRTC_MAC)
@@ -71,6 +169,8 @@
   clock_gettime(CLOCK_MONOTONIC, &ts);
   ticks = kNumNanosecsPerSec * static_cast<int64_t>(ts.tv_sec) +
           static_cast<int64_t>(ts.tv_nsec);
+#elif defined(WINUWP)
+  ticks = TimeHelper::TicksNs();
 #elif defined(WEBRTC_WIN)
   static volatile LONG last_timegettime = 0;
   static volatile int64_t num_wrap_timegettime = 0;
diff --git a/rtc_base/timeutils.h b/rtc_base/timeutils.h
index 4e38a03..c6ddd73 100644
--- a/rtc_base/timeutils.h
+++ b/rtc_base/timeutils.h
@@ -58,6 +58,12 @@
 // Returns previously set clock, or nullptr if no custom clock is being used.
 ClockInterface* GetClockForTesting();
 
+#if defined(WINUWP)
+// Synchronizes the current clock based upon an NTP server's epoch in
+// milliseconds.
+void SyncWithNtp(int64_t time_from_ntp_server_ms);
+#endif  // defined(WINUWP)
+
 // Returns the actual system time, even if a clock is set for testing.
 // Useful for timeouts while using a test clock, or for logging.
 int64_t SystemTimeNanos();
diff --git a/rtc_base/units/BUILD.gn b/rtc_base/units/BUILD.gn
new file mode 100644
index 0000000..8c722bb
--- /dev/null
+++ b/rtc_base/units/BUILD.gn
@@ -0,0 +1,37 @@
+# Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+import("../../webrtc.gni")
+
+rtc_source_set("unit_base") {
+  visibility = [
+    "../../api/units:*",
+    ":*",
+  ]
+  sources = [
+    "unit_base.h",
+  ]
+
+  deps = [
+    "../../rtc_base:checks",
+    "../../rtc_base:safe_conversions",
+  ]
+}
+
+if (rtc_include_tests) {
+  rtc_source_set("units_unittests") {
+    testonly = true
+    sources = [
+      "unit_base_unittest.cc",
+    ]
+    deps = [
+      ":unit_base",
+      "../../test:test_support",
+    ]
+  }
+}
diff --git a/rtc_base/units/unit_base.h b/rtc_base/units/unit_base.h
new file mode 100644
index 0000000..5503a32
--- /dev/null
+++ b/rtc_base/units/unit_base.h
@@ -0,0 +1,304 @@
+/*
+ *  Copyright 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+#ifndef RTC_BASE_UNITS_UNIT_BASE_H_
+#define RTC_BASE_UNITS_UNIT_BASE_H_
+
+#include <stdint.h>
+#include <algorithm>
+#include <cmath>
+#include <limits>
+#include <type_traits>
+
+#include "rtc_base/checks.h"
+#include "rtc_base/numerics/safe_conversions.h"
+
+namespace webrtc {
+namespace rtc_units_impl {
+
+// UnitBase is a base class for implementing custom value types with a specific
+// unit. It provides type safety and sommonly useful operations. The undelying
+// storage is always an int64_t, it's up to the unit implementation to choose
+// what scale it represents.
+//
+// It's used like:
+// class MyUnit: public UnitBase<MyUnit> {...};
+//
+// Unit_T is the subclass representing the specific unit.
+template <class Unit_T>
+class UnitBase {
+ public:
+  UnitBase() = delete;
+  static constexpr Unit_T Zero() { return Unit_T(0); }
+  static constexpr Unit_T PlusInfinity() { return Unit_T(PlusInfinityVal()); }
+  static constexpr Unit_T MinusInfinity() { return Unit_T(MinusInfinityVal()); }
+
+  constexpr bool IsZero() const { return value_ == 0; }
+  constexpr bool IsFinite() const { return !IsInfinite(); }
+  constexpr bool IsInfinite() const {
+    return value_ == PlusInfinityVal() || value_ == MinusInfinityVal();
+  }
+  constexpr bool IsPlusInfinity() const { return value_ == PlusInfinityVal(); }
+  constexpr bool IsMinusInfinity() const {
+    return value_ == MinusInfinityVal();
+  }
+
+  constexpr bool operator==(const Unit_T& other) const {
+    return value_ == other.value_;
+  }
+  constexpr bool operator!=(const Unit_T& other) const {
+    return value_ != other.value_;
+  }
+  constexpr bool operator<=(const Unit_T& other) const {
+    return value_ <= other.value_;
+  }
+  constexpr bool operator>=(const Unit_T& other) const {
+    return value_ >= other.value_;
+  }
+  constexpr bool operator>(const Unit_T& other) const {
+    return value_ > other.value_;
+  }
+  constexpr bool operator<(const Unit_T& other) const {
+    return value_ < other.value_;
+  }
+
+ protected:
+  template <int64_t value>
+  static constexpr Unit_T FromStaticValue() {
+    static_assert(value >= 0 || !Unit_T::one_sided, "");
+    static_assert(value > MinusInfinityVal(), "");
+    static_assert(value < PlusInfinityVal(), "");
+    return Unit_T(value);
+  }
+
+  template <int64_t fraction_value, int64_t Denominator>
+  static constexpr Unit_T FromStaticFraction() {
+    static_assert(fraction_value >= 0 || !Unit_T::one_sided, "");
+    static_assert(fraction_value > MinusInfinityVal() / Denominator, "");
+    static_assert(fraction_value < PlusInfinityVal() / Denominator, "");
+    return Unit_T(fraction_value * Denominator);
+  }
+
+  template <
+      typename T,
+      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  static Unit_T FromValue(T value) {
+    if (Unit_T::one_sided)
+      RTC_DCHECK_GE(value, 0);
+    RTC_DCHECK_GT(value, MinusInfinityVal());
+    RTC_DCHECK_LT(value, PlusInfinityVal());
+    return Unit_T(rtc::dchecked_cast<int64_t>(value));
+  }
+  template <typename T,
+            typename std::enable_if<std::is_floating_point<T>::value>::type* =
+                nullptr>
+  static Unit_T FromValue(T value) {
+    if (value == std::numeric_limits<T>::infinity()) {
+      return PlusInfinity();
+    } else if (value == -std::numeric_limits<T>::infinity()) {
+      return MinusInfinity();
+    } else {
+      RTC_DCHECK(!std::isnan(value));
+      return FromValue(rtc::dchecked_cast<int64_t>(value));
+    }
+  }
+
+  template <
+      int64_t Denominator,
+      typename T,
+      typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+  static Unit_T FromFraction(T value) {
+    if (Unit_T::one_sided)
+      RTC_DCHECK_GE(value, 0);
+    RTC_DCHECK_GT(value, MinusInfinityVal() / Denominator);
+    RTC_DCHECK_LT(value, PlusInfinityVal() / Denominator);
+    return Unit_T(rtc::dchecked_cast<int64_t>(value * Denominator));
+  }
+  template <int64_t Denominator,
+            typename T,
+            typename std::enable_if<std::is_floating_point<T>::value>::type* =
+                nullptr>
+  static Unit_T FromFraction(T value) {
+    return FromValue(value * Denominator);
+  }
+
+  template <typename T = int64_t>
+  typename std::enable_if<std::is_integral<T>::value, T>::type ToValue() const {
+    RTC_DCHECK(IsFinite());
+    return rtc::dchecked_cast<T>(value_);
+  }
+  template <typename T>
+  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
+  ToValue() const {
+    return IsPlusInfinity()
+               ? std::numeric_limits<T>::infinity()
+               : IsMinusInfinity() ? -std::numeric_limits<T>::infinity()
+                                   : value_;
+  }
+  template <typename T>
+  constexpr T ToValueOr(T fallback_value) const {
+    return IsFinite() ? value_ : fallback_value;
+  }
+
+  template <int64_t Denominator, typename T = int64_t>
+  typename std::enable_if<std::is_integral<T>::value, T>::type ToFraction()
+      const {
+    RTC_DCHECK(IsFinite());
+    if (Unit_T::one_sided) {
+      return rtc::dchecked_cast<T>(
+          DivRoundPositiveToNearest(value_, Denominator));
+    } else {
+      return rtc::dchecked_cast<T>(DivRoundToNearest(value_, Denominator));
+    }
+  }
+  template <int64_t Denominator, typename T>
+  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
+  ToFraction() const {
+    return ToValue<T>() * (1 / static_cast<T>(Denominator));
+  }
+
+  template <int64_t Denominator>
+  constexpr int64_t ToFractionOr(int64_t fallback_value) const {
+    return IsFinite() ? Unit_T::one_sided
+                            ? DivRoundPositiveToNearest(value_, Denominator)
+                            : DivRoundToNearest(value_, Denominator)
+                      : fallback_value;
+  }
+
+  template <int64_t Factor, typename T = int64_t>
+  typename std::enable_if<std::is_integral<T>::value, T>::type ToMultiple()
+      const {
+    RTC_DCHECK_GE(ToValue(), std::numeric_limits<T>::min() / Factor);
+    RTC_DCHECK_LE(ToValue(), std::numeric_limits<T>::max() / Factor);
+    return rtc::dchecked_cast<T>(ToValue() * Factor);
+  }
+  template <int64_t Factor, typename T>
+  constexpr typename std::enable_if<std::is_floating_point<T>::value, T>::type
+  ToMultiple() const {
+    return ToValue<T>() * Factor;
+  }
+
+  explicit constexpr UnitBase(int64_t value) : value_(value) {}
+
+ private:
+  template <class RelativeUnit_T>
+  friend class RelativeUnit;
+
+  static inline constexpr int64_t PlusInfinityVal() {
+    return std::numeric_limits<int64_t>::max();
+  }
+  static inline constexpr int64_t MinusInfinityVal() {
+    return std::numeric_limits<int64_t>::min();
+  }
+
+  Unit_T& AsSubClassRef() { return reinterpret_cast<Unit_T&>(*this); }
+  constexpr const Unit_T& AsSubClassRef() const {
+    return reinterpret_cast<const Unit_T&>(*this);
+  }
+  // Assumes that n >= 0 and d > 0.
+  static constexpr int64_t DivRoundPositiveToNearest(int64_t n, int64_t d) {
+    return (n + d / 2) / d;
+  }
+  // Assumes that d > 0.
+  static constexpr int64_t DivRoundToNearest(int64_t n, int64_t d) {
+    return (n + (n >= 0 ? d / 2 : -d / 2)) / d;
+  }
+
+  int64_t value_;
+};
+
+// Extends UnitBase to provide operations for relative units, that is, units
+// that have a meaningful relation between values such that a += b is a
+// sensible thing to do. For a,b <- same unit.
+template <class Unit_T>
+class RelativeUnit : public UnitBase<Unit_T> {
+ public:
+  Unit_T Clamped(Unit_T min_value, Unit_T max_value) const {
+    return std::max(min_value,
+                    std::min(UnitBase<Unit_T>::AsSubClassRef(), max_value));
+  }
+  void Clamp(Unit_T min_value, Unit_T max_value) {
+    *this = Clamped(min_value, max_value);
+  }
+  Unit_T operator+(const Unit_T other) const {
+    if (this->IsPlusInfinity() || other.IsPlusInfinity()) {
+      RTC_DCHECK(!this->IsMinusInfinity());
+      RTC_DCHECK(!other.IsMinusInfinity());
+      return this->PlusInfinity();
+    } else if (this->IsMinusInfinity() || other.IsMinusInfinity()) {
+      RTC_DCHECK(!this->IsPlusInfinity());
+      RTC_DCHECK(!other.IsPlusInfinity());
+      return this->MinusInfinity();
+    }
+    return UnitBase<Unit_T>::FromValue(this->ToValue() + other.ToValue());
+  }
+  Unit_T operator-(const Unit_T other) const {
+    if (this->IsPlusInfinity() || other.IsMinusInfinity()) {
+      RTC_DCHECK(!this->IsMinusInfinity());
+      RTC_DCHECK(!other.IsPlusInfinity());
+      return this->PlusInfinity();
+    } else if (this->IsMinusInfinity() || other.IsPlusInfinity()) {
+      RTC_DCHECK(!this->IsPlusInfinity());
+      RTC_DCHECK(!other.IsMinusInfinity());
+      return this->MinusInfinity();
+    }
+    return UnitBase<Unit_T>::FromValue(this->ToValue() - other.ToValue());
+  }
+  Unit_T& operator+=(const Unit_T other) {
+    *this = *this + other;
+    return this->AsSubClassRef();
+  }
+  Unit_T& operator-=(const Unit_T other) {
+    *this = *this - other;
+    return this->AsSubClassRef();
+  }
+  constexpr double operator/(const Unit_T other) const {
+    return UnitBase<Unit_T>::template ToValue<double>() /
+           other.template ToValue<double>();
+  }
+  template <typename T>
+  typename std::enable_if<std::is_arithmetic<T>::value, Unit_T>::type operator/(
+      const T& scalar) const {
+    return UnitBase<Unit_T>::FromValue(
+        std::round(UnitBase<Unit_T>::template ToValue<int64_t>() / scalar));
+  }
+  Unit_T operator*(const double scalar) const {
+    return UnitBase<Unit_T>::FromValue(std::round(this->ToValue() * scalar));
+  }
+  Unit_T operator*(const int64_t scalar) const {
+    return UnitBase<Unit_T>::FromValue(this->ToValue() * scalar);
+  }
+  Unit_T operator*(const int32_t scalar) const {
+    return UnitBase<Unit_T>::FromValue(this->ToValue() * scalar);
+  }
+
+ protected:
+  using UnitBase<Unit_T>::UnitBase;
+};
+
+template <class Unit_T>
+inline Unit_T operator*(const double scalar, const RelativeUnit<Unit_T> other) {
+  return other * scalar;
+}
+template <class Unit_T>
+inline Unit_T operator*(const int64_t scalar,
+                        const RelativeUnit<Unit_T> other) {
+  return other * scalar;
+}
+template <class Unit_T>
+inline Unit_T operator*(const int32_t& scalar,
+                        const RelativeUnit<Unit_T> other) {
+  return other * scalar;
+}
+
+}  // namespace rtc_units_impl
+
+}  // namespace webrtc
+
+#endif  // RTC_BASE_UNITS_UNIT_BASE_H_
diff --git a/rtc_base/units/unit_base_unittest.cc b/rtc_base/units/unit_base_unittest.cc
new file mode 100644
index 0000000..f8c8503
--- /dev/null
+++ b/rtc_base/units/unit_base_unittest.cc
@@ -0,0 +1,234 @@
+/*
+ *  Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "rtc_base/units/unit_base.h"
+
+#include "test/gtest.h"
+
+namespace webrtc {
+namespace {
+class TestUnit final : public rtc_units_impl::RelativeUnit<TestUnit> {
+ public:
+  TestUnit() = delete;
+
+  using UnitBase::FromStaticValue;
+  using UnitBase::FromValue;
+  using UnitBase::ToValue;
+  using UnitBase::ToValueOr;
+
+  template <int64_t kilo>
+  static constexpr TestUnit FromStaticKilo() {
+    return FromStaticFraction<kilo, 1000>();
+  }
+  template <typename T>
+  static TestUnit FromKilo(T kilo) {
+    return FromFraction<1000>(kilo);
+  }
+  template <typename T = int64_t>
+  T ToKilo() const {
+    return UnitBase::ToFraction<1000, T>();
+  }
+  constexpr int64_t ToKiloOr(int64_t fallback) const {
+    return UnitBase::ToFractionOr<1000>(fallback);
+  }
+  template <typename T>
+  constexpr T ToMilli() const {
+    return UnitBase::ToMultiple<1000, T>();
+  }
+
+ private:
+  friend class UnitBase<TestUnit>;
+  static constexpr bool one_sided = false;
+  using RelativeUnit<TestUnit>::RelativeUnit;
+};
+}  // namespace
+namespace test {
+TEST(UnitBaseTest, ConstExpr) {
+  constexpr int64_t kValue = -12345;
+  constexpr TestUnit kTestUnitZero = TestUnit::Zero();
+  constexpr TestUnit kTestUnitPlusInf = TestUnit::PlusInfinity();
+  constexpr TestUnit kTestUnitMinusInf = TestUnit::MinusInfinity();
+  static_assert(kTestUnitZero.IsZero(), "");
+  static_assert(kTestUnitPlusInf.IsPlusInfinity(), "");
+  static_assert(kTestUnitMinusInf.IsMinusInfinity(), "");
+  static_assert(kTestUnitPlusInf.ToKiloOr(-1) == -1, "");
+
+  static_assert(kTestUnitPlusInf > kTestUnitZero, "");
+
+  constexpr TestUnit kTestUnitKilo = TestUnit::FromStaticKilo<kValue>();
+  constexpr TestUnit kTestUnitValue = TestUnit::FromStaticValue<kValue>();
+
+  static_assert(kTestUnitKilo.ToKiloOr(0) == kValue, "");
+  static_assert(kTestUnitValue.ToValueOr(0) == kValue, "");
+}
+
+TEST(UnitBaseTest, GetBackSameValues) {
+  const int64_t kValue = 499;
+  for (int sign = -1; sign <= 1; ++sign) {
+    int64_t value = kValue * sign;
+    EXPECT_EQ(TestUnit::FromKilo(value).ToKilo(), value);
+    EXPECT_EQ(TestUnit::FromValue(value).ToValue<int64_t>(), value);
+  }
+  EXPECT_EQ(TestUnit::Zero().ToValue<int64_t>(), 0);
+}
+
+TEST(UnitBaseTest, GetDifferentPrefix) {
+  const int64_t kValue = 3000000;
+  EXPECT_EQ(TestUnit::FromValue(kValue).ToKilo(), kValue / 1000);
+  EXPECT_EQ(TestUnit::FromKilo(kValue).ToValue<int64_t>(), kValue * 1000);
+}
+
+TEST(UnitBaseTest, IdentityChecks) {
+  const int64_t kValue = 3000;
+  EXPECT_TRUE(TestUnit::Zero().IsZero());
+  EXPECT_FALSE(TestUnit::FromKilo(kValue).IsZero());
+
+  EXPECT_TRUE(TestUnit::PlusInfinity().IsInfinite());
+  EXPECT_TRUE(TestUnit::MinusInfinity().IsInfinite());
+  EXPECT_FALSE(TestUnit::Zero().IsInfinite());
+  EXPECT_FALSE(TestUnit::FromKilo(-kValue).IsInfinite());
+  EXPECT_FALSE(TestUnit::FromKilo(kValue).IsInfinite());
+
+  EXPECT_FALSE(TestUnit::PlusInfinity().IsFinite());
+  EXPECT_FALSE(TestUnit::MinusInfinity().IsFinite());
+  EXPECT_TRUE(TestUnit::FromKilo(-kValue).IsFinite());
+  EXPECT_TRUE(TestUnit::FromKilo(kValue).IsFinite());
+  EXPECT_TRUE(TestUnit::Zero().IsFinite());
+
+  EXPECT_TRUE(TestUnit::PlusInfinity().IsPlusInfinity());
+  EXPECT_FALSE(TestUnit::MinusInfinity().IsPlusInfinity());
+
+  EXPECT_TRUE(TestUnit::MinusInfinity().IsMinusInfinity());
+  EXPECT_FALSE(TestUnit::PlusInfinity().IsMinusInfinity());
+}
+
+TEST(UnitBaseTest, ComparisonOperators) {
+  const int64_t kSmall = 450;
+  const int64_t kLarge = 451;
+  const TestUnit small = TestUnit::FromKilo(kSmall);
+  const TestUnit large = TestUnit::FromKilo(kLarge);
+
+  EXPECT_EQ(TestUnit::Zero(), TestUnit::FromKilo(0));
+  EXPECT_EQ(TestUnit::PlusInfinity(), TestUnit::PlusInfinity());
+  EXPECT_EQ(small, TestUnit::FromKilo(kSmall));
+  EXPECT_LE(small, TestUnit::FromKilo(kSmall));
+  EXPECT_GE(small, TestUnit::FromKilo(kSmall));
+  EXPECT_NE(small, TestUnit::FromKilo(kLarge));
+  EXPECT_LE(small, TestUnit::FromKilo(kLarge));
+  EXPECT_LT(small, TestUnit::FromKilo(kLarge));
+  EXPECT_GE(large, TestUnit::FromKilo(kSmall));
+  EXPECT_GT(large, TestUnit::FromKilo(kSmall));
+  EXPECT_LT(TestUnit::Zero(), small);
+  EXPECT_GT(TestUnit::Zero(), TestUnit::FromKilo(-kSmall));
+  EXPECT_GT(TestUnit::Zero(), TestUnit::FromKilo(-kSmall));
+
+  EXPECT_GT(TestUnit::PlusInfinity(), large);
+  EXPECT_LT(TestUnit::MinusInfinity(), TestUnit::Zero());
+}
+
+TEST(UnitBaseTest, Clamping) {
+  const TestUnit upper = TestUnit::FromKilo(800);
+  const TestUnit lower = TestUnit::FromKilo(100);
+  const TestUnit under = TestUnit::FromKilo(100);
+  const TestUnit inside = TestUnit::FromKilo(500);
+  const TestUnit over = TestUnit::FromKilo(1000);
+  EXPECT_EQ(under.Clamped(lower, upper), lower);
+  EXPECT_EQ(inside.Clamped(lower, upper), inside);
+  EXPECT_EQ(over.Clamped(lower, upper), upper);
+
+  TestUnit mutable_delta = lower;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, lower);
+  mutable_delta = inside;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, inside);
+  mutable_delta = over;
+  mutable_delta.Clamp(lower, upper);
+  EXPECT_EQ(mutable_delta, upper);
+}
+
+TEST(UnitBaseTest, CanBeInititializedFromLargeInt) {
+  const int kMaxInt = std::numeric_limits<int>::max();
+  EXPECT_EQ(TestUnit::FromKilo(kMaxInt).ToValue<int64_t>(),
+            static_cast<int64_t>(kMaxInt) * 1000);
+}
+
+TEST(UnitBaseTest, ConvertsToAndFromDouble) {
+  const int64_t kValue = 17017;
+  const double kMilliDouble = kValue * 1e3;
+  const double kValueDouble = kValue;
+  const double kKiloDouble = kValue * 1e-3;
+
+  EXPECT_EQ(TestUnit::FromValue(kValue).ToKilo<double>(), kKiloDouble);
+  EXPECT_EQ(TestUnit::FromKilo(kKiloDouble).ToValue<int64_t>(), kValue);
+
+  EXPECT_EQ(TestUnit::FromValue(kValue).ToValue<double>(), kValueDouble);
+  EXPECT_EQ(TestUnit::FromValue(kValueDouble).ToValue<int64_t>(), kValue);
+
+  EXPECT_NEAR(TestUnit::FromValue(kValue).ToMilli<double>(), kMilliDouble, 1);
+
+  const double kPlusInfinity = std::numeric_limits<double>::infinity();
+  const double kMinusInfinity = -kPlusInfinity;
+
+  EXPECT_EQ(TestUnit::PlusInfinity().ToKilo<double>(), kPlusInfinity);
+  EXPECT_EQ(TestUnit::MinusInfinity().ToKilo<double>(), kMinusInfinity);
+  EXPECT_EQ(TestUnit::PlusInfinity().ToValue<double>(), kPlusInfinity);
+  EXPECT_EQ(TestUnit::MinusInfinity().ToValue<double>(), kMinusInfinity);
+  EXPECT_EQ(TestUnit::PlusInfinity().ToMilli<double>(), kPlusInfinity);
+  EXPECT_EQ(TestUnit::MinusInfinity().ToMilli<double>(), kMinusInfinity);
+
+  EXPECT_TRUE(TestUnit::FromKilo(kPlusInfinity).IsPlusInfinity());
+  EXPECT_TRUE(TestUnit::FromKilo(kMinusInfinity).IsMinusInfinity());
+  EXPECT_TRUE(TestUnit::FromValue(kPlusInfinity).IsPlusInfinity());
+  EXPECT_TRUE(TestUnit::FromValue(kMinusInfinity).IsMinusInfinity());
+}
+
+TEST(UnitBaseTest, MathOperations) {
+  const int64_t kValueA = 267;
+  const int64_t kValueB = 450;
+  const TestUnit delta_a = TestUnit::FromKilo(kValueA);
+  const TestUnit delta_b = TestUnit::FromKilo(kValueB);
+  EXPECT_EQ((delta_a + delta_b).ToKilo(), kValueA + kValueB);
+  EXPECT_EQ((delta_a - delta_b).ToKilo(), kValueA - kValueB);
+
+  const int32_t kInt32Value = 123;
+  const double kFloatValue = 123.0;
+  EXPECT_EQ((TestUnit::FromValue(kValueA) * kValueB).ToValue<int64_t>(),
+            kValueA * kValueB);
+  EXPECT_EQ((TestUnit::FromValue(kValueA) * kInt32Value).ToValue<int64_t>(),
+            kValueA * kInt32Value);
+  EXPECT_EQ((TestUnit::FromValue(kValueA) * kFloatValue).ToValue<int64_t>(),
+            kValueA * kFloatValue);
+
+  EXPECT_EQ((delta_b / 10).ToKilo(), kValueB / 10);
+  EXPECT_EQ(delta_b / delta_a, static_cast<double>(kValueB) / kValueA);
+
+  TestUnit mutable_delta = TestUnit::FromKilo(kValueA);
+  mutable_delta += TestUnit::FromKilo(kValueB);
+  EXPECT_EQ(mutable_delta, TestUnit::FromKilo(kValueA + kValueB));
+  mutable_delta -= TestUnit::FromKilo(kValueB);
+  EXPECT_EQ(mutable_delta, TestUnit::FromKilo(kValueA));
+}
+
+TEST(UnitBaseTest, InfinityOperations) {
+  const int64_t kValue = 267;
+  const TestUnit finite = TestUnit::FromKilo(kValue);
+  EXPECT_TRUE((TestUnit::PlusInfinity() + finite).IsPlusInfinity());
+  EXPECT_TRUE((TestUnit::PlusInfinity() - finite).IsPlusInfinity());
+  EXPECT_TRUE((finite + TestUnit::PlusInfinity()).IsPlusInfinity());
+  EXPECT_TRUE((finite - TestUnit::MinusInfinity()).IsPlusInfinity());
+
+  EXPECT_TRUE((TestUnit::MinusInfinity() + finite).IsMinusInfinity());
+  EXPECT_TRUE((TestUnit::MinusInfinity() - finite).IsMinusInfinity());
+  EXPECT_TRUE((finite + TestUnit::MinusInfinity()).IsMinusInfinity());
+  EXPECT_TRUE((finite - TestUnit::PlusInfinity()).IsMinusInfinity());
+}
+}  // namespace test
+}  // namespace webrtc
diff --git a/rtc_base/win/windows_version.cc b/rtc_base/win/windows_version.cc
index f10e42c..65ef4fd 100644
--- a/rtc_base/win/windows_version.cc
+++ b/rtc_base/win/windows_version.cc
@@ -28,6 +28,8 @@
 #error Creators Update SDK (10.0.15063.468) required.
 #endif
 
+#if !defined(WINUWP)
+
 namespace {
 
 typedef BOOL(WINAPI* GetProductInfoPtr)(DWORD, DWORD, DWORD, DWORD, PDWORD);
@@ -171,6 +173,8 @@
 
 }  // namespace
 
+#endif  // !defined(WINUWP)
+
 namespace rtc {
 namespace rtc_win {
 namespace {
@@ -221,6 +225,10 @@
 // this undocumented value appears to be similar to a patch number.
 // Returns 0 if the value does not exist or it could not be read.
 int GetUBR() {
+#if defined(WINUWP)
+  // The registry is not accessible for WinUWP sandboxed store applications.
+  return 0;
+#else
   // The values under the CurrentVersion registry hive are mirrored under
   // the corresponding Wow6432 hive.
   static constexpr wchar_t kRegKeyWindowsNTCurrentVersion[] =
@@ -236,6 +244,7 @@
   key.ReadValueDW(L"UBR", &ubr);
 
   return static_cast<int>(ubr);
+#endif  // defined(WINUWP)
 }
 
 }  // namespace
@@ -294,6 +303,7 @@
   processors_ = system_info.dwNumberOfProcessors;
   allocation_granularity_ = system_info.dwAllocationGranularity;
 
+#if !defined(WINUWP)
   GetProductInfoPtr get_product_info;
   DWORD os_type;
 
@@ -366,11 +376,21 @@
     // Windows is pre XP so we don't care but pick a safe default.
     version_type_ = SUITE_HOME;
   }
+#else
+  // WinUWP sandboxed store apps do not have a mechanism to determine
+  // product suite thus the most restricted suite is chosen.
+  version_type_ = SUITE_HOME;
+#endif  // !defined(WINUWP)
 }
 
 OSInfo::~OSInfo() {}
 
 std::string OSInfo::processor_model_name() {
+#if defined(WINUWP)
+  // WinUWP sandboxed store apps do not have the ability to
+  // probe the name of the current processor.
+  return "Unknown Processor (UWP)";
+#else
   if (processor_model_name_.empty()) {
     const wchar_t kProcessorNameString[] =
         L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
@@ -380,18 +400,24 @@
     processor_model_name_ = rtc::ToUtf8(value);
   }
   return processor_model_name_;
+#endif  // defined(WINUWP)
 }
 
 // static
 OSInfo::WOW64Status OSInfo::GetWOW64StatusForProcess(HANDLE process_handle) {
+  BOOL is_wow64;
+#if defined(WINUWP)
+  if (!IsWow64Process(process_handle, &is_wow64))
+    return WOW64_UNKNOWN;
+#else
   typedef BOOL(WINAPI * IsWow64ProcessFunc)(HANDLE, PBOOL);
   IsWow64ProcessFunc is_wow64_process = reinterpret_cast<IsWow64ProcessFunc>(
       GetProcAddress(GetModuleHandle(L"kernel32.dll"), "IsWow64Process"));
   if (!is_wow64_process)
     return WOW64_DISABLED;
-  BOOL is_wow64 = FALSE;
   if (!(*is_wow64_process)(process_handle, &is_wow64))
     return WOW64_UNKNOWN;
+#endif  // defined(WINUWP)
   return is_wow64 ? WOW64_ENABLED : WOW64_DISABLED;
 }
 
diff --git a/rtc_base/win32.cc b/rtc_base/win32.cc
index e3482e3..480d97b 100644
--- a/rtc_base/win32.cc
+++ b/rtc_base/win32.cc
@@ -331,6 +331,11 @@
   }
   // Replace forward slashes with backslashes
   std::replace(wfilename, wfilename + wlen, L'/', L'\\');
+#if defined(WINUWP)
+  // WinUWP sandboxed store applications require the paths to remain as
+  // relative paths.
+  filename->assign(wfilename);
+#else
   // Convert to complete filename
   DWORD full_len = ::GetFullPathName(wfilename, 0, nullptr, nullptr);
   if (0 == full_len) {
@@ -360,9 +365,18 @@
     // Already in long-path form.
   }
   filename->assign(start);
+#endif  // defined(WINUWP)
+
   return true;
 }
 
+// Windows UWP applications cannot obtain versioning information from
+// the sandbox with intention (as behehaviour based on OS versioning rather
+// than feature discovery / compilation flags is discoraged and Windows
+// 10 is living continously updated version unlike previous versions
+// of Windows).
+#if !defined(WINUWP)
+
 bool GetOsVersion(int* major, int* minor, int* build) {
   OSVERSIONINFO info = {0};
   info.dwOSVersionInfoSize = sizeof(info);
@@ -399,4 +413,6 @@
   return ret;
 }
 
+#endif  // !defined(WINUWP)
+
 }  // namespace rtc
diff --git a/rtc_base/win32.h b/rtc_base/win32.h
index a5fd541..004c6b4 100644
--- a/rtc_base/win32.h
+++ b/rtc_base/win32.h
@@ -54,6 +54,8 @@
   kWindowsVista = 6,
   kWindows10 = 10,
 };
+
+#if !defined(WINUWP)
 bool GetOsVersion(int* major, int* minor, int* build);
 
 inline bool IsWindowsVistaOrLater() {
@@ -87,6 +89,34 @@
           level < SECURITY_MANDATORY_MEDIUM_RID);
 }
 
+#else
+
+// When targetting WinUWP the OS must be Windows 10 (or greater) as lesser
+// Windows OS targets are not supported.
+inline bool IsWindowsVistaOrLater() {
+  return true;
+}
+
+inline bool IsWindowsXpOrLater() {
+  return true;
+}
+
+inline bool IsWindows8OrLater() {
+  return true;
+}
+
+inline bool IsWindows10OrLater() {
+  return true;
+}
+
+inline bool IsCurrentProcessLowIntegrity() {
+  // For WinUWP sandboxed store assume this is NOT a low integrity level run
+  // as application privileges can be requested in manifest as appropriate.
+  return true;
+}
+
+#endif  // !defined(WINUWP)
+
 }  // namespace rtc
 
 #endif  // RTC_BASE_WIN32_H_
diff --git a/system_wrappers/BUILD.gn b/system_wrappers/BUILD.gn
index 143a347..da83d12 100644
--- a/system_wrappers/BUILD.gn
+++ b/system_wrappers/BUILD.gn
@@ -33,7 +33,7 @@
   libs = []
   deps = [
     ":cpu_features_api",
-    "..:webrtc_common",
+    "../api:array_view",
     "../modules:module_api_public",
     "../rtc_base:checks",
     "../rtc_base/synchronization:rw_lock_wrapper",
@@ -89,9 +89,6 @@
   sources = [
     "include/cpu_features_wrapper.h",
   ]
-  deps = [
-    "..:webrtc_common",
-  ]
 }
 
 rtc_source_set("field_trial") {
diff --git a/system_wrappers/include/metrics.h b/system_wrappers/include/metrics.h
index 2a2cda0..f00ecf2 100644
--- a/system_wrappers/include/metrics.h
+++ b/system_wrappers/include/metrics.h
@@ -15,7 +15,6 @@
 #include <memory>
 #include <string>
 
-#include "common_types.h"  // NOLINT(build/include)
 #include "rtc_base/atomicops.h"
 #include "rtc_base/checks.h"
 
diff --git a/system_wrappers/include/rtp_to_ntp_estimator.h b/system_wrappers/include/rtp_to_ntp_estimator.h
index 51da4d2..c244c4f 100644
--- a/system_wrappers/include/rtp_to_ntp_estimator.h
+++ b/system_wrappers/include/rtp_to_ntp_estimator.h
@@ -42,23 +42,13 @@
 
   // Estimated parameters from RTP and NTP timestamp pairs in |measurements_|.
   struct Parameters {
-    // Implicit conversion from int because MovingMedianFilter returns 0
-    // internally if no samples are present. However, it should never happen as
-    // we don't ask smoothing_filter_ to return anything if there were no
-    // samples.
-    Parameters(const int& value) {  // NOLINT
-      RTC_NOTREACHED();
-    }
     Parameters() : frequency_khz(0.0), offset_ms(0.0) {}
 
+    Parameters(double frequency_khz, double offset_ms)
+        : frequency_khz(frequency_khz), offset_ms(offset_ms) {}
+
     double frequency_khz;
     double offset_ms;
-
-    // Needed to make it work inside MovingMedianFilter
-    bool operator<(const Parameters& other) const;
-    bool operator==(const Parameters& other) const;
-    bool operator<=(const Parameters& other) const;
-    bool operator!=(const Parameters& other) const;
   };
 
   // Updates measurements with RTP/NTP timestamp pair from a RTCP sender report.
@@ -70,7 +60,7 @@
 
   // Converts an RTP timestamp to the NTP domain in milliseconds.
   // Returns true on success, false otherwise.
-  bool Estimate(int64_t rtp_timestamp, int64_t* rtp_timestamp_ms) const;
+  bool Estimate(int64_t rtp_timestamp, int64_t* ntp_timestamp_ms) const;
 
   // Returns estimated rtp to ntp linear transform parameters.
   const absl::optional<Parameters> params() const;
@@ -82,8 +72,7 @@
 
   int consecutive_invalid_samples_;
   std::list<RtcpMeasurement> measurements_;
-  MovingMedianFilter<Parameters> smoothing_filter_;
-  bool params_calculated_;
+  absl::optional<Parameters> params_;
   mutable TimestampUnwrapper unwrapper_;
 };
 }  // namespace webrtc
diff --git a/system_wrappers/source/clock.cc b/system_wrappers/source/clock.cc
index 4f5d9cf..32cf1de 100644
--- a/system_wrappers/source/clock.cc
+++ b/system_wrappers/source/clock.cc
@@ -80,7 +80,29 @@
   }
 };
 
-#if defined(WEBRTC_WIN)
+#if defined(WINUWP)
+class WinUwpRealTimeClock final : public RealTimeClock {
+ public:
+  WinUwpRealTimeClock() = default;
+  ~WinUwpRealTimeClock() override {}
+
+ protected:
+  timeval CurrentTimeVal() const override {
+    // The rtc::SystemTimeNanos() method is already time offset from a base
+    // epoch value and might as be synchronized against an NTP time server as
+    // an added bonus.
+    auto nanos = rtc::SystemTimeNanos();
+
+    struct timeval tv;
+
+    tv.tv_sec = rtc::dchecked_cast<long>(nanos / 1000000000);
+    tv.tv_usec = rtc::dchecked_cast<long>(nanos / 1000);
+
+    return tv;
+  }
+};
+
+#elif defined(WEBRTC_WIN)
 // TODO(pbos): Consider modifying the implementation to synchronize itself
 // against system time (update ref_point_, make it non-const) periodically to
 // prevent clock drift.
@@ -202,7 +224,9 @@
 #endif  // defined(WEBRTC_POSIX)
 
 Clock* Clock::GetRealTimeClock() {
-#if defined(WEBRTC_WIN)
+#if defined(WINUWP)
+  static Clock* const clock = new WinUwpRealTimeClock();
+#elif defined(WEBRTC_WIN)
   static Clock* const clock = new WindowsRealTimeClock();
 #elif defined(WEBRTC_POSIX)
   static Clock* const clock = new UnixRealTimeClock();
diff --git a/system_wrappers/source/rtp_to_ntp_estimator.cc b/system_wrappers/source/rtp_to_ntp_estimator.cc
index aaef4b1..4bbf609 100644
--- a/system_wrappers/source/rtp_to_ntp_estimator.cc
+++ b/system_wrappers/source/rtp_to_ntp_estimator.cc
@@ -11,30 +11,22 @@
 #include "system_wrappers/include/rtp_to_ntp_estimator.h"
 
 #include <stddef.h>
+#include <cmath>
+#include <vector>
 
+#include "api/array_view.h"
 #include "rtc_base/checks.h"
 #include "rtc_base/logging.h"
 
 namespace webrtc {
 namespace {
-// Number of RTCP SR reports to use to map between RTP and NTP.
-const size_t kNumRtcpReportsToUse = 2;
-// Number of parameters samples used to smooth.
-const size_t kNumSamplesToSmooth = 20;
-
-// Calculates the RTP timestamp frequency from two pairs of NTP/RTP timestamps.
-bool CalculateFrequency(int64_t ntp_ms1,
-                        uint32_t rtp_timestamp1,
-                        int64_t ntp_ms2,
-                        uint32_t rtp_timestamp2,
-                        double* frequency_khz) {
-  if (ntp_ms1 <= ntp_ms2)
-    return false;
-
-  *frequency_khz = static_cast<double>(rtp_timestamp1 - rtp_timestamp2) /
-                   static_cast<double>(ntp_ms1 - ntp_ms2);
-  return true;
-}
+// Maximum number of RTCP SR reports to use to map between RTP and NTP.
+const size_t kNumRtcpReportsToUse = 20;
+// Don't allow NTP timestamps to jump more than 1 hour. Chosen arbitrary as big
+// enough to not affect normal use-cases. Yet it is smaller than RTP wrap-around
+// half-period (90khz RTP clock wrap-arounds every 13.25 hours). After half of
+// wrap-around period it is impossible to unwrap RTP timestamps correctly.
+const int kMaxAllowedRtcpNtpIntervalMs = 60 * 60 * 1000;
 
 bool Contains(const std::list<RtpToNtpEstimator::RtcpMeasurement>& measurements,
               const RtpToNtpEstimator::RtcpMeasurement& other) {
@@ -44,29 +36,47 @@
   }
   return false;
 }
-}  // namespace
 
-bool RtpToNtpEstimator::Parameters::operator<(const Parameters& other) const {
-  if (frequency_khz < other.frequency_khz - 1e-6) {
-    return true;
-  } else if (frequency_khz > other.frequency_khz + 1e-6) {
+// Given x[] and y[] writes out such k and b that line y=k*x+b approximates
+// given points in the best way (Least Squares Method).
+bool LinearRegression(rtc::ArrayView<const double> x,
+                      rtc::ArrayView<const double> y,
+                      double* k,
+                      double* b) {
+  size_t n = x.size();
+  if (n < 2)
     return false;
-  } else {
-    return offset_ms < other.offset_ms - 1e-6;
+
+  if (y.size() != n)
+    return false;
+
+  double avg_x = 0;
+  double avg_y = 0;
+  for (size_t i = 0; i < n; ++i) {
+    avg_x += x[i];
+    avg_y += y[i];
   }
+  avg_x /= n;
+  avg_y /= n;
+
+  double variance_x = 0;
+  double covariance_xy = 0;
+  for (size_t i = 0; i < n; ++i) {
+    double normalized_x = x[i] - avg_x;
+    double normalized_y = y[i] - avg_y;
+    variance_x += normalized_x * normalized_x;
+    covariance_xy += normalized_x * normalized_y;
+  }
+
+  if (std::fabs(variance_x) < 1e-8)
+    return false;
+
+  *k = static_cast<double>(covariance_xy / variance_x);
+  *b = static_cast<double>(avg_y - (*k) * avg_x);
+  return true;
 }
 
-bool RtpToNtpEstimator::Parameters::operator==(const Parameters& other) const {
-  return !(other < *this || *this < other);
-}
-
-bool RtpToNtpEstimator::Parameters::operator!=(const Parameters& other) const {
-  return other < *this || *this < other;
-}
-
-bool RtpToNtpEstimator::Parameters::operator<=(const Parameters& other) const {
-  return !(other < *this);
-}
+}  // namespace
 
 RtpToNtpEstimator::RtcpMeasurement::RtcpMeasurement(uint32_t ntp_secs,
                                                     uint32_t ntp_frac,
@@ -83,31 +93,29 @@
 }
 
 // Class for converting an RTP timestamp to the NTP domain.
-RtpToNtpEstimator::RtpToNtpEstimator()
-    : consecutive_invalid_samples_(0),
-      smoothing_filter_(kNumSamplesToSmooth),
-      params_calculated_(false) {}
+RtpToNtpEstimator::RtpToNtpEstimator() : consecutive_invalid_samples_(0) {}
 
 RtpToNtpEstimator::~RtpToNtpEstimator() {}
 
 void RtpToNtpEstimator::UpdateParameters() {
-  if (measurements_.size() != kNumRtcpReportsToUse)
+  if (measurements_.size() < 2)
     return;
 
-  Parameters params;
-  int64_t timestamp_new = measurements_.front().unwrapped_rtp_timestamp;
-  int64_t timestamp_old = measurements_.back().unwrapped_rtp_timestamp;
+  std::vector<double> x;
+  std::vector<double> y;
+  x.reserve(measurements_.size());
+  y.reserve(measurements_.size());
+  for (auto it = measurements_.begin(); it != measurements_.end(); ++it) {
+    x.push_back(it->unwrapped_rtp_timestamp);
+    y.push_back(it->ntp_time.ToMs());
+  }
+  double slope, offset;
 
-  int64_t ntp_ms_new = measurements_.front().ntp_time.ToMs();
-  int64_t ntp_ms_old = measurements_.back().ntp_time.ToMs();
-
-  if (!CalculateFrequency(ntp_ms_new, timestamp_new, ntp_ms_old, timestamp_old,
-                          &params.frequency_khz)) {
+  if (!LinearRegression(x, y, &slope, &offset)) {
     return;
   }
-  params.offset_ms = timestamp_new - params.frequency_khz * ntp_ms_new;
-  params_calculated_ = true;
-  smoothing_filter_.Insert(params);
+
+  params_.emplace(1 / slope, offset);
 }
 
 bool RtpToNtpEstimator::UpdateMeasurements(uint32_t ntp_secs,
@@ -133,7 +141,8 @@
   if (!measurements_.empty()) {
     int64_t old_rtp_timestamp = measurements_.front().unwrapped_rtp_timestamp;
     int64_t old_ntp_ms = measurements_.front().ntp_time.ToMs();
-    if (ntp_ms_new <= old_ntp_ms) {
+    if (ntp_ms_new <= old_ntp_ms ||
+        ntp_ms_new > old_ntp_ms + kMaxAllowedRtcpNtpIntervalMs) {
       invalid_sample = true;
     } else if (unwrapped_rtp_timestamp <= old_rtp_timestamp) {
       RTC_LOG(LS_WARNING)
@@ -153,8 +162,7 @@
     RTC_LOG(LS_WARNING) << "Multiple consecutively invalid RTCP SR reports, "
                            "clearing measurements.";
     measurements_.clear();
-    smoothing_filter_.Reset();
-    params_calculated_ = false;
+    params_ = absl::nullopt;
   }
   consecutive_invalid_samples_ = 0;
 
@@ -171,35 +179,29 @@
 }
 
 bool RtpToNtpEstimator::Estimate(int64_t rtp_timestamp,
-                                 int64_t* rtp_timestamp_ms) const {
-  if (!params_calculated_)
+                                 int64_t* ntp_timestamp_ms) const {
+  if (!params_)
     return false;
 
   int64_t rtp_timestamp_unwrapped = unwrapper_.Unwrap(rtp_timestamp);
 
-  Parameters params = smoothing_filter_.GetFilteredValue();
-
   // params_calculated_ should not be true unless ms params.frequency_khz has
   // been calculated to something non zero.
-  RTC_DCHECK_NE(params.frequency_khz, 0.0);
+  RTC_DCHECK_NE(params_->frequency_khz, 0.0);
   double rtp_ms =
-      (static_cast<double>(rtp_timestamp_unwrapped) - params.offset_ms) /
-          params.frequency_khz +
-      0.5f;
+      static_cast<double>(rtp_timestamp_unwrapped) / params_->frequency_khz +
+      params_->offset_ms + 0.5f;
 
   if (rtp_ms < 0)
     return false;
 
-  *rtp_timestamp_ms = rtp_ms;
+  *ntp_timestamp_ms = rtp_ms;
+
   return true;
 }
 
 const absl::optional<RtpToNtpEstimator::Parameters> RtpToNtpEstimator::params()
     const {
-  absl::optional<Parameters> res;
-  if (params_calculated_) {
-    res.emplace(smoothing_filter_.GetFilteredValue());
-  }
-  return res;
+  return params_;
 }
 }  // namespace webrtc
diff --git a/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc b/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc
index 0647ec8..b0b83bb 100644
--- a/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc
+++ b/system_wrappers/source/rtp_to_ntp_estimator_unittest.cc
@@ -9,11 +9,13 @@
  */
 
 #include "system_wrappers/include/rtp_to_ntp_estimator.h"
+#include "rtc_base/random.h"
 #include "test/gtest.h"
 
 namespace webrtc {
 namespace {
 const uint32_t kOneMsInNtpFrac = 4294967;
+const uint32_t kOneHourInNtpSec = 60 * 60;
 const uint32_t kTimestampTicksPerMs = 90;
 }  // namespace
 
@@ -224,6 +226,22 @@
       estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
 }
 
+TEST(UpdateRtcpMeasurementTests, FailsForTooNewNtp) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 1;
+  uint32_t ntp_frac = 699925050;
+  uint32_t timestamp = 0x12345678;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+  // Ntp time from far future, list not updated.
+  ntp_sec += kOneHourInNtpSec * 2;
+  timestamp += kTimestampTicksPerMs * 10;
+  EXPECT_FALSE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+}
+
 TEST(UpdateRtcpMeasurementTests, FailsForEqualTimestamp) {
   RtpToNtpEstimator estimator;
   uint32_t ntp_sec = 0;
@@ -292,4 +310,37 @@
   EXPECT_FALSE(estimator.Estimate(timestamp, &timestamp_ms));
 }
 
+TEST(RtpToNtpTests, AveragesErrorOut) {
+  RtpToNtpEstimator estimator;
+  uint32_t ntp_sec = 1;
+  uint32_t ntp_frac = 90000000;  // More than 1 ms.
+  uint32_t timestamp = 0x12345678;
+  const int kNtpSecStep = 1;  // 1 second.
+  const int kRtpTicksPerMs = 90;
+  const int kRtpStep = kRtpTicksPerMs * 1000;
+  bool new_sr;
+  EXPECT_TRUE(
+      estimator.UpdateMeasurements(ntp_sec, ntp_frac, timestamp, &new_sr));
+  EXPECT_TRUE(new_sr);
+
+  Random rand(1123536L);
+  for (size_t i = 0; i < 1000; i++) {
+    // Advance both timestamps by exactly 1 second.
+    ntp_sec += kNtpSecStep;
+    timestamp += kRtpStep;
+    // Add upto 1ms of errors to NTP and RTP timestamps passed to estimator.
+    EXPECT_TRUE(estimator.UpdateMeasurements(
+        ntp_sec,
+        ntp_frac + rand.Rand(-static_cast<int>(kOneMsInNtpFrac),
+                             static_cast<int>(kOneMsInNtpFrac)),
+        timestamp + rand.Rand(-kRtpTicksPerMs, kRtpTicksPerMs), &new_sr));
+    EXPECT_TRUE(new_sr);
+
+    int64_t estimated_ntp_ms;
+    EXPECT_TRUE(estimator.Estimate(timestamp, &estimated_ntp_ms));
+    // Allow upto 2 ms of error.
+    EXPECT_NEAR(NtpTime(ntp_sec, ntp_frac).ToMs(), estimated_ntp_ms, 2);
+  }
+}
+
 };  // namespace webrtc